Commit | Line | Data |
---|---|---|
984263bc MD |
1 | /* |
2 | * Copyright (c) 1991, 1993 | |
3 | * The Regents of the University of California. All rights reserved. | |
4 | * | |
5 | * This code is derived from software contributed to Berkeley by | |
6 | * The Mach Operating System project at Carnegie-Mellon University. | |
7 | * | |
8 | * Redistribution and use in source and binary forms, with or without | |
9 | * modification, are permitted provided that the following conditions | |
10 | * are met: | |
11 | * 1. Redistributions of source code must retain the above copyright | |
12 | * notice, this list of conditions and the following disclaimer. | |
13 | * 2. Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in the | |
15 | * documentation and/or other materials provided with the distribution. | |
16 | * 3. All advertising materials mentioning features or use of this software | |
17 | * must display the following acknowledgement: | |
18 | * This product includes software developed by the University of | |
19 | * California, Berkeley and its contributors. | |
20 | * 4. Neither the name of the University nor the names of its contributors | |
21 | * may be used to endorse or promote products derived from this software | |
22 | * without specific prior written permission. | |
23 | * | |
24 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
25 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
26 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
27 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
28 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
29 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
30 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
31 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
32 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
33 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
34 | * SUCH DAMAGE. | |
35 | * | |
36 | * @(#)vm_map.h 8.9 (Berkeley) 5/17/95 | |
37 | * | |
38 | * | |
39 | * Copyright (c) 1987, 1990 Carnegie-Mellon University. | |
40 | * All rights reserved. | |
41 | * | |
42 | * Authors: Avadis Tevanian, Jr., Michael Wayne Young | |
43 | * | |
44 | * Permission to use, copy, modify and distribute this software and | |
45 | * its documentation is hereby granted, provided that both the copyright | |
46 | * notice and this permission notice appear in all copies of the | |
47 | * software, derivative works or modified versions, and any portions | |
48 | * thereof, and that both notices appear in supporting documentation. | |
49 | * | |
50 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
51 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND | |
52 | * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
53 | * | |
54 | * Carnegie Mellon requests users of this software to return to | |
55 | * | |
56 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
57 | * School of Computer Science | |
58 | * Carnegie Mellon University | |
59 | * Pittsburgh PA 15213-3890 | |
60 | * | |
61 | * any improvements or extensions that they make and grant Carnegie the | |
62 | * rights to redistribute these changes. | |
63 | * | |
64 | * $FreeBSD: src/sys/vm/vm_map.h,v 1.54.2.5 2003/01/13 22:51:17 dillon Exp $ | |
65 | */ | |
66 | ||
67 | /* | |
68 | * Virtual memory map module definitions. | |
69 | */ | |
70 | ||
1bd40720 MD |
71 | #ifndef _VM_VM_MAP_H_ |
72 | #define _VM_VM_MAP_H_ | |
984263bc | 73 | |
1bd40720 MD |
74 | #ifndef _SYS_TYPES_H_ |
75 | #include <sys/types.h> | |
76 | #endif | |
7dc51496 | 77 | #ifdef _KERNEL |
87882e12 | 78 | #ifndef _SYS_KERNEL_H_ |
7dc51496 MD |
79 | #include <sys/kernel.h> /* ticks */ |
80 | #endif | |
87882e12 | 81 | #endif |
1bd40720 | 82 | #ifndef _SYS_TREE_H_ |
686dbf64 | 83 | #include <sys/tree.h> |
1bd40720 | 84 | #endif |
e3161323 MD |
85 | #ifndef _SYS_SYSREF_H_ |
86 | #include <sys/sysref.h> | |
87 | #endif | |
1bd40720 MD |
88 | #ifndef _SYS_LOCK_H_ |
89 | #include <sys/lock.h> | |
90 | #endif | |
afeabdca MD |
91 | #ifndef _SYS_VKERNEL_H_ |
92 | #include <sys/vkernel.h> | |
93 | #endif | |
1bd40720 MD |
94 | #ifndef _VM_VM_H_ |
95 | #include <vm/vm.h> | |
96 | #endif | |
97 | #ifndef _MACHINE_PMAP_H_ | |
98 | #include <machine/pmap.h> | |
99 | #endif | |
50a55c46 MD |
100 | #ifndef _VM_VM_OBJECT_H_ |
101 | #include <vm/vm_object.h> | |
102 | #endif | |
103 | #ifndef _SYS_NULL_H_ | |
104 | #include <sys/_null.h> | |
105 | #endif | |
1bd40720 | 106 | |
686dbf64 MD |
107 | struct vm_map_rb_tree; |
108 | RB_PROTOTYPE(vm_map_rb_tree, vm_map_entry, rb_entry, rb_vm_map_compare); | |
109 | ||
984263bc MD |
110 | /* |
111 | * Types defined: | |
112 | * | |
113 | * vm_map_t the high-level address map data structure. | |
114 | * vm_map_entry_t an entry in an address map. | |
115 | */ | |
116 | ||
69e16e2a | 117 | typedef u_int vm_flags_t; |
984263bc MD |
118 | typedef u_int vm_eflags_t; |
119 | ||
120 | /* | |
121 | * Objects which live in maps may be either VM objects, or | |
122 | * another map (called a "sharing map") which denotes read-write | |
123 | * sharing with other maps. | |
124 | */ | |
984263bc MD |
125 | union vm_map_object { |
126 | struct vm_object *vm_object; /* object object */ | |
127 | struct vm_map *sub_map; /* belongs to another map */ | |
128 | }; | |
129 | ||
afeabdca MD |
130 | union vm_map_aux { |
131 | vm_offset_t avail_ssize; /* amt can grow if this is a stack */ | |
132 | vpte_t master_pde; /* virtual page table root */ | |
133 | }; | |
134 | ||
984263bc MD |
135 | /* |
136 | * Address map entries consist of start and end addresses, | |
137 | * a VM object (or sharing map) and offset into that object, | |
138 | * and user-exported inheritance and protection information. | |
139 | * Also included is control information for virtual copy operations. | |
568e6804 MD |
140 | * |
141 | * When used with MAP_STACK, avail_ssize is used to determine the | |
142 | * limits of stack growth. | |
143 | * | |
144 | * When used with VM_MAPTYPE_VPAGETABLE, avail_ssize stores the | |
145 | * page directory index. | |
984263bc MD |
146 | */ |
147 | struct vm_map_entry { | |
148 | struct vm_map_entry *prev; /* previous entry */ | |
149 | struct vm_map_entry *next; /* next entry */ | |
686dbf64 | 150 | RB_ENTRY(vm_map_entry) rb_entry; |
984263bc MD |
151 | vm_offset_t start; /* start address */ |
152 | vm_offset_t end; /* end address */ | |
afeabdca | 153 | union vm_map_aux aux; /* auxillary data */ |
984263bc MD |
154 | union vm_map_object object; /* object I point to */ |
155 | vm_ooffset_t offset; /* offset into object */ | |
156 | vm_eflags_t eflags; /* map entry flags */ | |
1b874851 | 157 | vm_maptype_t maptype; /* type of VM mapping */ |
984263bc MD |
158 | vm_prot_t protection; /* protection code */ |
159 | vm_prot_t max_protection; /* maximum protection */ | |
160 | vm_inherit_t inheritance; /* inheritance */ | |
161 | int wired_count; /* can be paged if = 0 */ | |
984263bc MD |
162 | }; |
163 | ||
164 | #define MAP_ENTRY_NOSYNC 0x0001 | |
c809941b | 165 | #define MAP_ENTRY_STACK 0x0002 |
984263bc MD |
166 | #define MAP_ENTRY_COW 0x0004 |
167 | #define MAP_ENTRY_NEEDS_COPY 0x0008 | |
168 | #define MAP_ENTRY_NOFAULT 0x0010 | |
169 | #define MAP_ENTRY_USER_WIRED 0x0020 | |
170 | ||
171 | #define MAP_ENTRY_BEHAV_NORMAL 0x0000 /* default behavior */ | |
172 | #define MAP_ENTRY_BEHAV_SEQUENTIAL 0x0040 /* expect sequential access */ | |
173 | #define MAP_ENTRY_BEHAV_RANDOM 0x0080 /* expect random access */ | |
174 | #define MAP_ENTRY_BEHAV_RESERVED 0x00C0 /* future use */ | |
175 | ||
176 | #define MAP_ENTRY_BEHAV_MASK 0x00C0 | |
177 | ||
178 | #define MAP_ENTRY_IN_TRANSITION 0x0100 /* entry being changed */ | |
179 | #define MAP_ENTRY_NEEDS_WAKEUP 0x0200 /* waiter's in transition */ | |
180 | #define MAP_ENTRY_NOCOREDUMP 0x0400 /* don't include in a core */ | |
e40cfbd7 | 181 | #define MAP_ENTRY_KSTACK 0x0800 /* guarded kernel stack */ |
984263bc MD |
182 | |
183 | /* | |
184 | * flags for vm_map_[un]clip_range() | |
185 | */ | |
186 | #define MAP_CLIP_NO_HOLES 0x0001 | |
187 | ||
a108bf71 MD |
188 | /* |
189 | * This reserve count for vm_map_entry_reserve() should cover all nominal | |
190 | * single-insertion operations, including any necessary clipping. | |
191 | */ | |
192 | #define MAP_RESERVE_COUNT 4 | |
193 | #define MAP_RESERVE_SLOP 32 | |
194 | ||
984263bc MD |
195 | static __inline u_char |
196 | vm_map_entry_behavior(struct vm_map_entry *entry) | |
197 | { | |
198 | return entry->eflags & MAP_ENTRY_BEHAV_MASK; | |
199 | } | |
200 | ||
201 | static __inline void | |
202 | vm_map_entry_set_behavior(struct vm_map_entry *entry, u_char behavior) | |
203 | { | |
204 | entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | | |
205 | (behavior & MAP_ENTRY_BEHAV_MASK); | |
206 | } | |
207 | ||
208 | /* | |
b12defdc MD |
209 | * Maps are doubly-linked lists of map entries, kept sorted by address. |
210 | * A single hint is provided to start searches again from the last | |
211 | * successful search, insertion, or removal. | |
984263bc | 212 | * |
b12defdc MD |
213 | * NOTE: The lock structure cannot be the first element of vm_map |
214 | * because this can result in a running lockup between two or more | |
215 | * system processes trying to kmem_alloc_wait() due to kmem_alloc_wait() | |
216 | * and free tsleep/waking up 'map' and the underlying lockmgr also | |
217 | * sleeping and waking up on 'map'. The lockup occurs when the map fills | |
218 | * up. The 'exec' map, for example. | |
219 | * | |
220 | * NOTE: The vm_map structure can be hard-locked with the lockmgr lock | |
221 | * or soft-serialized with the token, or both. | |
984263bc MD |
222 | */ |
223 | struct vm_map { | |
224 | struct vm_map_entry header; /* List of entries */ | |
686dbf64 | 225 | RB_HEAD(vm_map_rb_tree, vm_map_entry) rb_root; |
984263bc MD |
226 | struct lock lock; /* Lock for map data */ |
227 | int nentries; /* Number of entries */ | |
228 | vm_size_t size; /* virtual size */ | |
229 | u_char system_map; /* Am I a system map? */ | |
984263bc MD |
230 | vm_map_entry_t hint; /* hint for quick lookups */ |
231 | unsigned int timestamp; /* Version number */ | |
232 | vm_map_entry_t first_free; /* First free space hint */ | |
69e16e2a | 233 | vm_flags_t flags; /* flags for this vm_map */ |
984263bc | 234 | struct pmap *pmap; /* Physical map */ |
87882e12 MD |
235 | u_int president_cache; /* Remember president count */ |
236 | u_int president_ticks; /* Save ticks for cache */ | |
b12defdc | 237 | struct lwkt_token token; /* Soft serializer */ |
984263bc MD |
238 | #define min_offset header.start |
239 | #define max_offset header.end | |
240 | }; | |
241 | ||
69e16e2a VS |
242 | /* |
243 | * vm_flags_t values | |
244 | */ | |
54341a3b | 245 | #define MAP_WIREFUTURE 0x0001 /* wire all future pages */ |
69e16e2a | 246 | |
984263bc MD |
247 | /* |
248 | * Shareable process virtual address space. | |
e3161323 MD |
249 | * |
250 | * Refd pointers from vmresident, proc | |
984263bc MD |
251 | */ |
252 | struct vmspace { | |
253 | struct vm_map vm_map; /* VM address map */ | |
254 | struct pmap vm_pmap; /* private physical map */ | |
a2ee730d | 255 | int vm_flags; |
984263bc MD |
256 | caddr_t vm_shm; /* SYS5 shared memory private data XXX */ |
257 | /* we copy from vm_startcopy to the end of the structure on fork */ | |
258 | #define vm_startcopy vm_rssize | |
259 | segsz_t vm_rssize; /* current resident set size in pages */ | |
260 | segsz_t vm_swrss; /* resident set size before last swap */ | |
261 | segsz_t vm_tsize; /* text size (pages) XXX */ | |
262 | segsz_t vm_dsize; /* data size (pages) XXX */ | |
263 | segsz_t vm_ssize; /* stack size (pages) */ | |
264 | caddr_t vm_taddr; /* user virtual address of text XXX */ | |
265 | caddr_t vm_daddr; /* user virtual address of data XXX */ | |
266 | caddr_t vm_maxsaddr; /* user VA at max stack growth */ | |
267 | caddr_t vm_minsaddr; /* user VA at max stack growth */ | |
239b4df9 | 268 | #define vm_endcopy vm_exitingcnt |
a2ee730d | 269 | int vm_exitingcnt; /* exit/wait context reaping */ |
7adb15b6 | 270 | int vm_unused01; /* for future fields */ |
46311ac2 | 271 | int vm_pagesupply; |
a2ee730d | 272 | u_int vm_holdcount; |
7adb15b6 | 273 | void *vm_unused02; /* for future fields */ |
e3161323 | 274 | struct sysref vm_sysref; /* sysref, refcnt, etc */ |
984263bc MD |
275 | }; |
276 | ||
a2ee730d MD |
277 | #define VMSPACE_EXIT1 0x0001 /* partial exit */ |
278 | #define VMSPACE_EXIT2 0x0002 /* full exit */ | |
279 | ||
29802dbb MD |
280 | /* |
281 | * Resident executable holding structure. A user program can take a snapshot | |
282 | * of just its VM address space (typically done just after dynamic link | |
283 | * libraries have completed loading) and register it as a resident | |
284 | * executable associated with the program binary's vnode, which is also | |
285 | * locked into memory. Future execs of the vnode will start with a copy | |
286 | * of the resident vmspace instead of running the binary from scratch, | |
287 | * avoiding both the kernel ELF loader *AND* all shared library mapping and | |
288 | * relocation code, and will call a different entry point (the stack pointer | |
289 | * is reset to the top of the stack) supplied when the vmspace was registered. | |
290 | */ | |
291 | struct vmresident { | |
292 | struct vnode *vr_vnode; /* associated vnode */ | |
293 | TAILQ_ENTRY(vmresident) vr_link; /* linked list of res sts */ | |
294 | struct vmspace *vr_vmspace; /* vmspace to fork */ | |
295 | intptr_t vr_entry_addr; /* registered entry point */ | |
296 | struct sysentvec *vr_sysent; /* system call vects */ | |
297 | int vr_id; /* registration id */ | |
dbd1282f | 298 | int vr_refs; /* temporary refs */ |
29802dbb MD |
299 | }; |
300 | ||
d5fa53d4 | 301 | #ifdef _KERNEL |
984263bc MD |
302 | /* |
303 | * Macros: vm_map_lock, etc. | |
304 | * Function: | |
305 | * Perform locking on the data portion of a map. Note that | |
306 | * these macros mimic procedure calls returning void. The | |
307 | * semicolon is supplied by the user of these macros, not | |
308 | * by the macros themselves. The macros can safely be used | |
309 | * as unbraced elements in a higher level statement. | |
310 | */ | |
311 | ||
46754a20 MD |
312 | #define ASSERT_VM_MAP_LOCKED(map) KKASSERT(lockowned(&(map)->lock)) |
313 | ||
984263bc MD |
314 | #ifdef DIAGNOSTIC |
315 | /* #define MAP_LOCK_DIAGNOSTIC 1 */ | |
316 | #ifdef MAP_LOCK_DIAGNOSTIC | |
317 | #define vm_map_lock(map) \ | |
318 | do { \ | |
086c1d7e | 319 | kprintf ("locking map LK_EXCLUSIVE: 0x%x\n", map); \ |
df4f70a6 | 320 | if (lockmgr(&(map)->lock, LK_EXCLUSIVE) != 0) { \ |
984263bc MD |
321 | panic("vm_map_lock: failed to get lock"); \ |
322 | } \ | |
323 | (map)->timestamp++; \ | |
324 | } while(0) | |
325 | #else | |
326 | #define vm_map_lock(map) \ | |
327 | do { \ | |
df4f70a6 | 328 | if (lockmgr(&(map)->lock, LK_EXCLUSIVE) != 0) { \ |
984263bc MD |
329 | panic("vm_map_lock: failed to get lock"); \ |
330 | } \ | |
331 | (map)->timestamp++; \ | |
332 | } while(0) | |
333 | #endif | |
334 | #else | |
335 | #define vm_map_lock(map) \ | |
336 | do { \ | |
df4f70a6 | 337 | lockmgr(&(map)->lock, LK_EXCLUSIVE); \ |
984263bc MD |
338 | (map)->timestamp++; \ |
339 | } while(0) | |
340 | #endif /* DIAGNOSTIC */ | |
341 | ||
342 | #if defined(MAP_LOCK_DIAGNOSTIC) | |
343 | #define vm_map_unlock(map) \ | |
344 | do { \ | |
086c1d7e | 345 | kprintf ("locking map LK_RELEASE: 0x%x\n", map); \ |
df4f70a6 | 346 | lockmgr(&(map)->lock, LK_RELEASE); \ |
984263bc MD |
347 | } while (0) |
348 | #define vm_map_lock_read(map) \ | |
349 | do { \ | |
086c1d7e | 350 | kprintf ("locking map LK_SHARED: 0x%x\n", map); \ |
df4f70a6 | 351 | lockmgr(&(map)->lock, LK_SHARED); \ |
984263bc MD |
352 | } while (0) |
353 | #define vm_map_unlock_read(map) \ | |
354 | do { \ | |
086c1d7e | 355 | kprintf ("locking map LK_RELEASE: 0x%x\n", map); \ |
df4f70a6 | 356 | lockmgr(&(map)->lock, LK_RELEASE); \ |
984263bc MD |
357 | } while (0) |
358 | #else | |
359 | #define vm_map_unlock(map) \ | |
df4f70a6 | 360 | lockmgr(&(map)->lock, LK_RELEASE) |
984263bc | 361 | #define vm_map_lock_read(map) \ |
df4f70a6 | 362 | lockmgr(&(map)->lock, LK_SHARED) |
984263bc | 363 | #define vm_map_unlock_read(map) \ |
df4f70a6 | 364 | lockmgr(&(map)->lock, LK_RELEASE) |
984263bc MD |
365 | #endif |
366 | ||
87882e12 MD |
367 | #define vm_map_lock_read_try(map) \ |
368 | lockmgr(&(map)->lock, LK_SHARED | LK_NOWAIT) | |
369 | ||
625a2937 MD |
370 | static __inline__ int |
371 | vm_map_lock_read_to(vm_map_t map) | |
372 | { | |
373 | int error; | |
374 | ||
375 | #if defined(MAP_LOCK_DIAGNOSTIC) | |
376 | kprintf ("locking map LK_SHARED: 0x%x\n", map); | |
377 | #endif | |
378 | error = lockmgr(&(map)->lock, LK_SHARED | LK_TIMELOCK); | |
379 | return error; | |
380 | } | |
381 | ||
984263bc | 382 | static __inline__ int |
df4f70a6 | 383 | vm_map_lock_upgrade(vm_map_t map) { |
984263bc MD |
384 | int error; |
385 | #if defined(MAP_LOCK_DIAGNOSTIC) | |
086c1d7e | 386 | kprintf("locking map LK_EXCLUPGRADE: 0x%x\n", map); |
984263bc | 387 | #endif |
df4f70a6 | 388 | error = lockmgr(&map->lock, LK_EXCLUPGRADE); |
984263bc MD |
389 | if (error == 0) |
390 | map->timestamp++; | |
391 | return error; | |
392 | } | |
393 | ||
984263bc MD |
394 | #if defined(MAP_LOCK_DIAGNOSTIC) |
395 | #define vm_map_lock_downgrade(map) \ | |
396 | do { \ | |
086c1d7e | 397 | kprintf ("locking map LK_DOWNGRADE: 0x%x\n", map); \ |
df4f70a6 | 398 | lockmgr(&(map)->lock, LK_DOWNGRADE); \ |
984263bc MD |
399 | } while (0) |
400 | #else | |
401 | #define vm_map_lock_downgrade(map) \ | |
df4f70a6 | 402 | lockmgr(&(map)->lock, LK_DOWNGRADE) |
984263bc MD |
403 | #endif |
404 | ||
d5fa53d4 HP |
405 | #endif /* _KERNEL */ |
406 | ||
984263bc MD |
407 | /* |
408 | * Functions implemented as macros | |
409 | */ | |
410 | #define vm_map_min(map) ((map)->min_offset) | |
411 | #define vm_map_max(map) ((map)->max_offset) | |
412 | #define vm_map_pmap(map) ((map)->pmap) | |
413 | ||
4a28fe22 MD |
414 | /* |
415 | * Must not block | |
416 | */ | |
984263bc MD |
417 | static __inline struct pmap * |
418 | vmspace_pmap(struct vmspace *vmspace) | |
419 | { | |
420 | return &vmspace->vm_pmap; | |
421 | } | |
422 | ||
b12defdc MD |
423 | /* |
424 | * Caller must hold the vmspace->vm_map.token | |
425 | */ | |
984263bc MD |
426 | static __inline long |
427 | vmspace_resident_count(struct vmspace *vmspace) | |
428 | { | |
429 | return pmap_resident_count(vmspace_pmap(vmspace)); | |
430 | } | |
431 | ||
87882e12 MD |
432 | /* |
433 | * Calculates the proportional RSS and returning the | |
434 | * accrued result. This is a loose value for statistics/display | |
435 | * purposes only and will only be updated if we can acquire | |
436 | * a non-blocking map lock. | |
7dc51496 MD |
437 | * |
438 | * (used by userland or the kernel) | |
50a55c46 MD |
439 | */ |
440 | static __inline u_int | |
441 | vmspace_president_count(struct vmspace *vmspace) | |
442 | { | |
443 | vm_map_t map = &vmspace->vm_map; | |
444 | vm_map_entry_t cur; | |
445 | vm_object_t object; | |
446 | u_int count = 0; | |
b12defdc | 447 | u_int n; |
50a55c46 | 448 | |
7dc51496 | 449 | #ifdef _KERNEL |
87882e12 MD |
450 | if (map->president_ticks == ticks / hz || vm_map_lock_read_try(map)) |
451 | return(map->president_cache); | |
7dc51496 | 452 | #endif |
87882e12 | 453 | |
50a55c46 MD |
454 | for (cur = map->header.next; cur != &map->header; cur = cur->next) { |
455 | switch(cur->maptype) { | |
456 | case VM_MAPTYPE_NORMAL: | |
457 | case VM_MAPTYPE_VPAGETABLE: | |
458 | if ((object = cur->object.vm_object) == NULL) | |
459 | break; | |
460 | if (object->type != OBJT_DEFAULT && | |
461 | object->type != OBJT_SWAP) { | |
462 | break; | |
463 | } | |
b12defdc MD |
464 | /* |
465 | * synchronize non-zero case, contents of field | |
466 | * can change at any time due to pmap ops. | |
467 | */ | |
468 | if ((n = object->agg_pv_list_count) != 0) { | |
469 | #ifdef _KERNEL | |
470 | cpu_ccfence(); | |
471 | #endif | |
472 | count += object->resident_page_count / n; | |
50a55c46 MD |
473 | } |
474 | break; | |
475 | default: | |
476 | break; | |
477 | } | |
478 | } | |
7dc51496 | 479 | #ifdef _KERNEL |
87882e12 MD |
480 | map->president_cache = count; |
481 | map->president_ticks = ticks / hz; | |
482 | vm_map_unlock_read(map); | |
7dc51496 | 483 | #endif |
87882e12 | 484 | |
50a55c46 MD |
485 | return(count); |
486 | } | |
487 | ||
a108bf71 MD |
488 | /* |
489 | * Number of kernel maps and entries to statically allocate, required | |
490 | * during boot to bootstrap the VM system. | |
491 | */ | |
984263bc | 492 | #define MAX_KMAP 10 |
8e5ea5f7 | 493 | #define MAX_MAPENT 2048 /* required to support up to 64 cpus */ |
984263bc MD |
494 | |
495 | /* | |
496 | * Copy-on-write flags for vm_map operations | |
497 | */ | |
498 | #define MAP_UNUSED_01 0x0001 | |
499 | #define MAP_COPY_ON_WRITE 0x0002 | |
500 | #define MAP_NOFAULT 0x0004 | |
501 | #define MAP_PREFAULT 0x0008 | |
502 | #define MAP_PREFAULT_PARTIAL 0x0010 | |
503 | #define MAP_DISABLE_SYNCER 0x0020 | |
c809941b | 504 | #define MAP_IS_STACK 0x0040 |
e40cfbd7 | 505 | #define MAP_IS_KSTACK 0x0080 |
984263bc MD |
506 | #define MAP_DISABLE_COREDUMP 0x0100 |
507 | #define MAP_PREFAULT_MADVISE 0x0200 /* from (user) madvise request */ | |
508 | ||
509 | /* | |
510 | * vm_fault option flags | |
511 | */ | |
0035dca9 MD |
512 | #define VM_FAULT_NORMAL 0x00 /* Nothing special */ |
513 | #define VM_FAULT_CHANGE_WIRING 0x01 /* Change the wiring as appropriate */ | |
514 | #define VM_FAULT_USER_WIRE 0x02 /* Likewise, but for user purposes */ | |
1b9d3514 | 515 | #define VM_FAULT_BURST 0x04 /* Burst fault can be done */ |
0035dca9 | 516 | #define VM_FAULT_DIRTY 0x08 /* Dirty the page */ |
9f3543c6 | 517 | #define VM_FAULT_UNSWAP 0x10 /* Remove backing store from the page */ |
54341a3b | 518 | #define VM_FAULT_BURST_QUICK 0x20 /* Special case shared vm_object */ |
0035dca9 | 519 | #define VM_FAULT_WIRE_MASK (VM_FAULT_CHANGE_WIRING|VM_FAULT_USER_WIRE) |
984263bc MD |
520 | |
521 | #ifdef _KERNEL | |
e3161323 MD |
522 | |
523 | extern struct sysref_class vmspace_sysref_class; | |
524 | ||
46754a20 MD |
525 | boolean_t vm_map_check_protection (vm_map_t, vm_offset_t, vm_offset_t, |
526 | vm_prot_t, boolean_t); | |
984263bc | 527 | struct pmap; |
41a01a4d | 528 | struct globaldata; |
53025830 | 529 | void vm_map_entry_allocate_object(vm_map_entry_t); |
41a01a4d | 530 | void vm_map_entry_reserve_cpu_init(struct globaldata *gd); |
a108bf71 MD |
531 | int vm_map_entry_reserve(int); |
532 | int vm_map_entry_kreserve(int); | |
533 | void vm_map_entry_release(int); | |
534 | void vm_map_entry_krelease(int); | |
e4846942 | 535 | vm_map_t vm_map_create (vm_map_t, struct pmap *, vm_offset_t, vm_offset_t); |
a108bf71 | 536 | int vm_map_delete (vm_map_t, vm_offset_t, vm_offset_t, int *); |
1b874851 | 537 | int vm_map_find (vm_map_t, vm_object_t, vm_ooffset_t, |
9388fcaa MD |
538 | vm_offset_t *, vm_size_t, vm_size_t, |
539 | boolean_t, vm_maptype_t, | |
1b874851 MD |
540 | vm_prot_t, vm_prot_t, |
541 | int); | |
9388fcaa | 542 | int vm_map_findspace (vm_map_t, vm_offset_t, vm_size_t, vm_size_t, |
c809941b | 543 | int, vm_offset_t *); |
911e30e2 | 544 | vm_offset_t vm_map_hint(struct proc *, vm_offset_t, vm_prot_t); |
1388df65 | 545 | int vm_map_inherit (vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t); |
e4846942 | 546 | void vm_map_init (struct vm_map *, vm_offset_t, vm_offset_t, pmap_t); |
1b874851 MD |
547 | int vm_map_insert (vm_map_t, int *, vm_object_t, vm_ooffset_t, |
548 | vm_offset_t, vm_offset_t, | |
549 | vm_maptype_t, | |
550 | vm_prot_t, vm_prot_t, | |
551 | int); | |
1388df65 RG |
552 | int vm_map_lookup (vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *, |
553 | vm_pindex_t *, vm_prot_t *, boolean_t *); | |
a108bf71 | 554 | void vm_map_lookup_done (vm_map_t, vm_map_entry_t, int); |
1388df65 | 555 | boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *); |
e1359933 | 556 | int vm_map_wire (vm_map_t, vm_offset_t, vm_offset_t, int); |
cde87949 | 557 | int vm_map_unwire (vm_map_t, vm_offset_t, vm_offset_t, boolean_t); |
1388df65 RG |
558 | int vm_map_clean (vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t); |
559 | int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t); | |
560 | int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t); | |
561 | void vm_map_startup (void); | |
562 | int vm_map_submap (vm_map_t, vm_offset_t, vm_offset_t, vm_map_t); | |
afeabdca | 563 | int vm_map_madvise (vm_map_t, vm_offset_t, vm_offset_t, int, off_t); |
a108bf71 | 564 | void vm_map_simplify_entry (vm_map_t, vm_map_entry_t, int *); |
1388df65 RG |
565 | void vm_init2 (void); |
566 | int vm_uiomove (vm_map_t, vm_object_t, off_t, int, vm_offset_t, int *); | |
c809941b | 567 | int vm_map_stack (vm_map_t, vm_offset_t, vm_size_t, int, |
85d25bcf | 568 | vm_prot_t, vm_prot_t, int); |
1388df65 RG |
569 | int vm_map_growstack (struct proc *p, vm_offset_t addr); |
570 | int vmspace_swap_count (struct vmspace *vmspace); | |
20479584 | 571 | int vmspace_anonymous_count (struct vmspace *vmspace); |
a108bf71 | 572 | void vm_map_set_wired_quick(vm_map_t map, vm_offset_t addr, vm_size_t size, int *); |
ff13bc52 VS |
573 | void vm_map_transition_wait(vm_map_t map); |
574 | ||
d63ed24b MD |
575 | #if defined(__amd64__) && defined(_KERNEL_VIRTUAL) |
576 | int vkernel_module_memory_alloc(vm_offset_t *, size_t); | |
577 | void vkernel_module_memory_free(vm_offset_t, size_t); | |
578 | #endif | |
984263bc MD |
579 | |
580 | #endif | |
1bd40720 | 581 | #endif /* _VM_VM_MAP_H_ */ |