nrelease - fix/improve livecd
[dragonfly.git] / sys / vm / vm_map.h
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
9de48ead 4 * Copyright (c) 2003-2019 The DragonFly Project. All rights reserved.
984263bc
MD
5 *
6 * This code is derived from software contributed to Berkeley by
7 * The Mach Operating System project at Carnegie-Mellon University.
8 *
641f3b0a
MD
9 * This code is derived from software contributed to The DragonFly Project
10 * by Matthew Dillon <dillon@backplane.com>
11 *
984263bc
MD
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
2c64e990 20 * 3. Neither the name of the University nor the names of its contributors
984263bc
MD
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vm_map.h 8.9 (Berkeley) 5/17/95
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43 *
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
49 *
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 *
54 * Carnegie Mellon requests users of this software to return to
55 *
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
984263bc
MD
63 */
64
65/*
8492a2fe
MD
66 * Virtual memory map module definitions. The vm_map houses the pmap
67 * structure which controls the mmu context for a process.
984263bc 68 */
1bd40720
MD
69#ifndef _VM_VM_MAP_H_
70#define _VM_VM_MAP_H_
984263bc 71
1bd40720
MD
72#ifndef _SYS_TYPES_H_
73#include <sys/types.h>
74#endif
7dc51496 75#ifdef _KERNEL
87882e12 76#ifndef _SYS_KERNEL_H_
7dc51496
MD
77#include <sys/kernel.h> /* ticks */
78#endif
87882e12 79#endif
1bd40720 80#ifndef _SYS_TREE_H_
686dbf64 81#include <sys/tree.h>
1bd40720
MD
82#endif
83#ifndef _SYS_LOCK_H_
84#include <sys/lock.h>
85#endif
afeabdca
MD
86#ifndef _SYS_VKERNEL_H_
87#include <sys/vkernel.h>
88#endif
1bd40720
MD
89#ifndef _VM_VM_H_
90#include <vm/vm.h>
91#endif
92#ifndef _MACHINE_PMAP_H_
93#include <machine/pmap.h>
94#endif
50a55c46
MD
95#ifndef _VM_VM_OBJECT_H_
96#include <vm/vm_object.h>
97#endif
98#ifndef _SYS_NULL_H_
99#include <sys/_null.h>
100#endif
1bd40720 101
686dbf64
MD
102struct vm_map_rb_tree;
103RB_PROTOTYPE(vm_map_rb_tree, vm_map_entry, rb_entry, rb_vm_map_compare);
104
69e16e2a 105typedef u_int vm_flags_t;
984263bc
MD
106typedef u_int vm_eflags_t;
107
8492a2fe
MD
108/*
109 * Aux structure depends on map type and/or flags.
110 */
afeabdca
MD
111union vm_map_aux {
112 vm_offset_t avail_ssize; /* amt can grow if this is a stack */
113 vpte_t master_pde; /* virtual page table root */
0adbcbd6
MD
114 struct cdev *dev;
115 void *map_aux;
afeabdca
MD
116};
117
3091de50
MD
118/*
119 * vm_map_entry identifiers, used as a debugging aid
120 */
121typedef enum {
122 VM_SUBSYS_UNKNOWN,
123 VM_SUBSYS_KMALLOC,
124 VM_SUBSYS_STACK,
125 VM_SUBSYS_IMGACT,
126 VM_SUBSYS_EFI,
127 VM_SUBSYS_RESERVED,
128 VM_SUBSYS_INIT,
129 VM_SUBSYS_PIPE,
130 VM_SUBSYS_PROC,
131 VM_SUBSYS_SHMEM,
132 VM_SUBSYS_SYSMAP,
133 VM_SUBSYS_MMAP,
134 VM_SUBSYS_BRK,
135 VM_SUBSYS_BOGUS,
136 VM_SUBSYS_BUF,
137 VM_SUBSYS_BUFDATA,
138 VM_SUBSYS_GD,
139 VM_SUBSYS_IPIQ,
140 VM_SUBSYS_PVENTRY,
141 VM_SUBSYS_PML4,
142 VM_SUBSYS_MAPDEV,
143 VM_SUBSYS_ZALLOC,
144
145 VM_SUBSYS_DM,
146 VM_SUBSYS_CONTIG,
147 VM_SUBSYS_DRM,
148 VM_SUBSYS_DRM_GEM,
149 VM_SUBSYS_DRM_SCAT,
150 VM_SUBSYS_DRM_VMAP,
151 VM_SUBSYS_DRM_TTM,
152 VM_SUBSYS_HAMMER,
30d365ff 153 VM_SUBSYS_NVMM,
3091de50 154
70f3bb08
MD
155 VM_SUBSYS_VMPGHASH,
156
3091de50
MD
157 VM_SUBSYS_LIMIT /* end of list */
158} vm_subsys_t;
159
64b5a8a5
MD
160#define UKSMAPOP_ADD 1
161#define UKSMAPOP_REM 2
162#define UKSMAPOP_FAULT 3
163
984263bc 164/*
9de48ead
MD
165 * vm_map backing structure for specifying multiple backings. This
166 * structure is NOT shared across pmaps but may be shared within a pmap.
167 * The offset is cumulatively added from its parent, allowing easy splits
168 * and merges.
169 */
9de48ead 170struct vm_map_backing {
67e7cb85
MD
171 vm_offset_t start; /* start address in pmap */
172 vm_offset_t end; /* end address in pmap */
173 struct pmap *pmap; /* for vm_object extents */
174
9de48ead 175 struct vm_map_backing *backing_ba; /* backing store */
64b5a8a5
MD
176
177 /*
178 * Keep track of extents, typically via a vm_object but for uksmaps
179 * this can also be based off of a process or lwp.
180 */
181 TAILQ_ENTRY(vm_map_backing) entry;
9de48ead
MD
182
183 /*
184 * A vm_map_entry may reference an object, a submap, a uksmap, or a
185 * direct user-kernel shared map.
186 */
187 union {
188 struct vm_object *object; /* vm_object */
8492a2fe 189 struct vm_map *sub_map; /* belongs to another map */
64b5a8a5
MD
190 int (*uksmap)(struct vm_map_backing *entry,
191 int op,
192 struct cdev *dev,
193 vm_page_t fake);
8492a2fe 194 void *map_object; /* generic */
9de48ead 195 };
64b5a8a5 196 void *aux_info;
9de48ead 197
64b5a8a5
MD
198 /*
199 * The offset field typically represents the absolute offset in the
200 * object, but can have other meanings for uksmaps.
201 */
202 vm_ooffset_t offset;
9de48ead 203 uint32_t flags;
44293a80 204 uint32_t backing_count; /* #entries backing us */
9de48ead
MD
205};
206
44293a80 207typedef struct vm_map_backing *vm_map_backing_t;
9de48ead
MD
208
209#define VM_MAP_BACK_EXCL_HEUR 0x00000001U
210
211/*
212 * Address map entries consist of start and end addresses, a VM object
213 * (or sharing map) and offset into that object, and user-exported
214 * inheritance and protection information. Also included is control
215 * information for virtual copy operations.
216 *
217 * The object information is now encapsulated in a vm_map_backing
218 * structure which contains the backing store chain, if any. This
219 * structure is NOT shared.
568e6804 220 *
9de48ead
MD
221 * When used with MAP_STACK, avail_ssize is used to determine the limits
222 * of stack growth.
984263bc
MD
223 */
224struct vm_map_entry {
686dbf64 225 RB_ENTRY(vm_map_entry) rb_entry;
afeabdca 226 union vm_map_aux aux; /* auxillary data */
44293a80 227 struct vm_map_backing ba; /* backing object chain */
8492a2fe
MD
228 vm_eflags_t eflags; /* map entry flags */
229 vm_maptype_t maptype; /* type of VM mapping */
230 vm_prot_t protection; /* protection code */
231 vm_prot_t max_protection; /* maximum protection */
232 vm_inherit_t inheritance; /* inheritance */
233 int wired_count; /* can be paged if = 0 */
234 vm_subsys_t id; /* subsystem id */
984263bc
MD
235};
236
44293a80
MD
237typedef struct vm_map_entry *vm_map_entry_t;
238
47ec0953
MD
239#define MAPENT_FREELIST(ent) (ent)->rb_entry.rbe_left
240
984263bc 241#define MAP_ENTRY_NOSYNC 0x0001
c809941b 242#define MAP_ENTRY_STACK 0x0002
984263bc
MD
243#define MAP_ENTRY_COW 0x0004
244#define MAP_ENTRY_NEEDS_COPY 0x0008
245#define MAP_ENTRY_NOFAULT 0x0010
246#define MAP_ENTRY_USER_WIRED 0x0020
247
248#define MAP_ENTRY_BEHAV_NORMAL 0x0000 /* default behavior */
249#define MAP_ENTRY_BEHAV_SEQUENTIAL 0x0040 /* expect sequential access */
250#define MAP_ENTRY_BEHAV_RANDOM 0x0080 /* expect random access */
251#define MAP_ENTRY_BEHAV_RESERVED 0x00C0 /* future use */
252
253#define MAP_ENTRY_BEHAV_MASK 0x00C0
254
255#define MAP_ENTRY_IN_TRANSITION 0x0100 /* entry being changed */
256#define MAP_ENTRY_NEEDS_WAKEUP 0x0200 /* waiter's in transition */
257#define MAP_ENTRY_NOCOREDUMP 0x0400 /* don't include in a core */
e40cfbd7 258#define MAP_ENTRY_KSTACK 0x0800 /* guarded kernel stack */
984263bc
MD
259
260/*
261 * flags for vm_map_[un]clip_range()
262 */
263#define MAP_CLIP_NO_HOLES 0x0001
264
a108bf71
MD
265/*
266 * This reserve count for vm_map_entry_reserve() should cover all nominal
267 * single-insertion operations, including any necessary clipping.
268 */
269#define MAP_RESERVE_COUNT 4
c7d06799 270#define MAP_RESERVE_SLOP 512
ce5d7a1c 271#define MAP_RESERVE_HYST (MAP_RESERVE_SLOP - MAP_RESERVE_SLOP / 8)
a108bf71 272
7a45978d
MD
273/*
274 * vm_map_lookup wflags
275 */
276#define FW_WIRED 0x0001
277#define FW_DIDCOW 0x0002
278
984263bc
MD
279static __inline u_char
280vm_map_entry_behavior(struct vm_map_entry *entry)
281{
282 return entry->eflags & MAP_ENTRY_BEHAV_MASK;
283}
284
285static __inline void
286vm_map_entry_set_behavior(struct vm_map_entry *entry, u_char behavior)
287{
288 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
289 (behavior & MAP_ENTRY_BEHAV_MASK);
290}
291
fc531fbc 292/*
4d4f84f5
MD
293 * Virtual address range interlock
294 *
295 * Used by MADV_INVAL in vm_map.c, but it is unclear whether we still
296 * need it with the vpagetable support removed.
fc531fbc
MD
297 */
298struct vm_map_ilock {
299 struct vm_map_ilock *next;
300 int flags;
301 vm_offset_t ran_beg;
302 vm_offset_t ran_end; /* non-inclusive */
303};
304
305#define ILOCK_WAITING 0x00000001
306
e6b81333
MD
307/*
308 * Hinting mechanism used by vm_map_findspace() to figure out where to start
309 * an iteration looking for a hole big enough for the requested allocation.
310 * This can be important in situations where large amounts of kernel memory
311 * are being managed. For example, if the system is managing tens of
312 * thousands of processes or threads.
313 *
314 * If a hint is present it guarantees that no compatible hole exists prior
315 * to the (start) address. The (start) address itself is not necessarily
316 * a hole.
317 */
318#define VM_MAP_FFCOUNT 4
319#define VM_MAP_FFMASK (VM_MAP_FFCOUNT - 1)
320
321struct vm_map_freehint {
322 vm_offset_t start;
323 vm_offset_t length;
324 vm_offset_t align;
325 int unused01;
326};
327typedef struct vm_map_freehint vm_map_freehint_t;
328
984263bc 329/*
737b020b 330 * A vm_map stores a red-black tree of map entries, indexed by address.
b12defdc
MD
331 *
332 * NOTE: The vm_map structure can be hard-locked with the lockmgr lock
333 * or soft-serialized with the token, or both.
984263bc 334 */
47ec0953
MD
335RB_HEAD(vm_map_rb_tree, vm_map_entry);
336
984263bc 337struct vm_map {
8492a2fe 338 struct lock lock; /* Lock for map data */
47ec0953 339 struct vm_map_rb_tree rb_root; /* Organize map entries */
8492a2fe
MD
340 vm_offset_t min_addr; /* min address */
341 vm_offset_t max_addr; /* max address */
342 int nentries; /* Number of entries */
343 unsigned int timestamp; /* Version number */
344 vm_size_t size; /* virtual size */
345 u_char system_map; /* Am I a system map? */
346 u_char freehint_newindex;
347 u_char unused02;
348 u_char unused03;
349 vm_flags_t flags; /* flags for this vm_map */
e6b81333 350 vm_map_freehint_t freehint[VM_MAP_FFCOUNT];
8492a2fe 351 struct pmap *pmap; /* Physical map */
fc531fbc 352 struct vm_map_ilock *ilock_base;/* interlocks */
8492a2fe 353 struct spinlock ilock_spin; /* interlocks (spinlock for) */
b12defdc 354 struct lwkt_token token; /* Soft serializer */
a7a03a5f 355 vm_offset_t pgout_offset; /* for RLIMIT_RSS scans */
984263bc
MD
356};
357
44293a80
MD
358typedef struct vm_map *vm_map_t;
359
69e16e2a
VS
360/*
361 * vm_flags_t values
362 */
54341a3b 363#define MAP_WIREFUTURE 0x0001 /* wire all future pages */
69e16e2a 364
984263bc
MD
365/*
366 * Shareable process virtual address space.
e3161323
MD
367 *
368 * Refd pointers from vmresident, proc
984263bc
MD
369 */
370struct vmspace {
371 struct vm_map vm_map; /* VM address map */
372 struct pmap vm_pmap; /* private physical map */
a2ee730d 373 int vm_flags;
984263bc 374 caddr_t vm_shm; /* SYS5 shared memory private data XXX */
737b020b 375/* we copy from vm_startcopy on fork */
984263bc
MD
376#define vm_startcopy vm_rssize
377 segsz_t vm_rssize; /* current resident set size in pages */
378 segsz_t vm_swrss; /* resident set size before last swap */
4b566556
MD
379 segsz_t vm_tsize; /* text size (bytes) */
380 segsz_t vm_dsize; /* data size (bytes) */
381 segsz_t vm_ssize; /* stack size (bytes) */
984263bc
MD
382 caddr_t vm_taddr; /* user virtual address of text XXX */
383 caddr_t vm_daddr; /* user virtual address of data XXX */
384 caddr_t vm_maxsaddr; /* user VA at max stack growth */
385 caddr_t vm_minsaddr; /* user VA at max stack growth */
93f86408
MD
386#define vm_endcopy vm_unused01
387 int vm_unused01;
388 int vm_unused02;
46311ac2 389 int vm_pagesupply;
93f86408
MD
390 u_int vm_holdcnt; /* temporary hold count and exit sequencing */
391 u_int vm_refcnt; /* normal ref count */
984263bc
MD
392};
393
07540d37
MD
394#define VM_REF_DELETED 0x80000000U
395
93f86408
MD
396#define VMSPACE_EXIT1 0x0001 /* partial exit */
397#define VMSPACE_EXIT2 0x0002 /* full exit */
398
399#define VMSPACE_HOLDEXIT 0x80000000
a2ee730d 400
29802dbb
MD
401/*
402 * Resident executable holding structure. A user program can take a snapshot
403 * of just its VM address space (typically done just after dynamic link
404 * libraries have completed loading) and register it as a resident
405 * executable associated with the program binary's vnode, which is also
406 * locked into memory. Future execs of the vnode will start with a copy
407 * of the resident vmspace instead of running the binary from scratch,
408 * avoiding both the kernel ELF loader *AND* all shared library mapping and
409 * relocation code, and will call a different entry point (the stack pointer
410 * is reset to the top of the stack) supplied when the vmspace was registered.
411 */
412struct vmresident {
413 struct vnode *vr_vnode; /* associated vnode */
414 TAILQ_ENTRY(vmresident) vr_link; /* linked list of res sts */
415 struct vmspace *vr_vmspace; /* vmspace to fork */
416 intptr_t vr_entry_addr; /* registered entry point */
417 struct sysentvec *vr_sysent; /* system call vects */
418 int vr_id; /* registration id */
dbd1282f 419 int vr_refs; /* temporary refs */
29802dbb
MD
420};
421
d5fa53d4 422#ifdef _KERNEL
984263bc
MD
423/*
424 * Macros: vm_map_lock, etc.
425 * Function:
426 * Perform locking on the data portion of a map. Note that
427 * these macros mimic procedure calls returning void. The
428 * semicolon is supplied by the user of these macros, not
429 * by the macros themselves. The macros can safely be used
430 * as unbraced elements in a higher level statement.
431 */
432
46754a20
MD
433#define ASSERT_VM_MAP_LOCKED(map) KKASSERT(lockowned(&(map)->lock))
434
984263bc
MD
435#ifdef DIAGNOSTIC
436/* #define MAP_LOCK_DIAGNOSTIC 1 */
437#ifdef MAP_LOCK_DIAGNOSTIC
438#define vm_map_lock(map) \
439 do { \
086c1d7e 440 kprintf ("locking map LK_EXCLUSIVE: 0x%x\n", map); \
df4f70a6 441 if (lockmgr(&(map)->lock, LK_EXCLUSIVE) != 0) { \
984263bc
MD
442 panic("vm_map_lock: failed to get lock"); \
443 } \
444 (map)->timestamp++; \
445 } while(0)
446#else
447#define vm_map_lock(map) \
448 do { \
df4f70a6 449 if (lockmgr(&(map)->lock, LK_EXCLUSIVE) != 0) { \
984263bc
MD
450 panic("vm_map_lock: failed to get lock"); \
451 } \
452 (map)->timestamp++; \
453 } while(0)
454#endif
455#else
456#define vm_map_lock(map) \
457 do { \
df4f70a6 458 lockmgr(&(map)->lock, LK_EXCLUSIVE); \
984263bc
MD
459 (map)->timestamp++; \
460 } while(0)
461#endif /* DIAGNOSTIC */
462
463#if defined(MAP_LOCK_DIAGNOSTIC)
464#define vm_map_unlock(map) \
465 do { \
086c1d7e 466 kprintf ("locking map LK_RELEASE: 0x%x\n", map); \
df4f70a6 467 lockmgr(&(map)->lock, LK_RELEASE); \
984263bc
MD
468 } while (0)
469#define vm_map_lock_read(map) \
470 do { \
086c1d7e 471 kprintf ("locking map LK_SHARED: 0x%x\n", map); \
df4f70a6 472 lockmgr(&(map)->lock, LK_SHARED); \
984263bc
MD
473 } while (0)
474#define vm_map_unlock_read(map) \
475 do { \
086c1d7e 476 kprintf ("locking map LK_RELEASE: 0x%x\n", map); \
df4f70a6 477 lockmgr(&(map)->lock, LK_RELEASE); \
984263bc
MD
478 } while (0)
479#else
480#define vm_map_unlock(map) \
df4f70a6 481 lockmgr(&(map)->lock, LK_RELEASE)
984263bc 482#define vm_map_lock_read(map) \
df4f70a6 483 lockmgr(&(map)->lock, LK_SHARED)
984263bc 484#define vm_map_unlock_read(map) \
df4f70a6 485 lockmgr(&(map)->lock, LK_RELEASE)
984263bc
MD
486#endif
487
87882e12
MD
488#define vm_map_lock_read_try(map) \
489 lockmgr(&(map)->lock, LK_SHARED | LK_NOWAIT)
490
625a2937
MD
491static __inline__ int
492vm_map_lock_read_to(vm_map_t map)
493{
494 int error;
495
496#if defined(MAP_LOCK_DIAGNOSTIC)
497 kprintf ("locking map LK_SHARED: 0x%x\n", map);
498#endif
499 error = lockmgr(&(map)->lock, LK_SHARED | LK_TIMELOCK);
500 return error;
501}
502
984263bc 503static __inline__ int
df4f70a6 504vm_map_lock_upgrade(vm_map_t map) {
984263bc
MD
505 int error;
506#if defined(MAP_LOCK_DIAGNOSTIC)
086c1d7e 507 kprintf("locking map LK_EXCLUPGRADE: 0x%x\n", map);
984263bc 508#endif
df4f70a6 509 error = lockmgr(&map->lock, LK_EXCLUPGRADE);
984263bc
MD
510 if (error == 0)
511 map->timestamp++;
512 return error;
513}
514
984263bc
MD
515#if defined(MAP_LOCK_DIAGNOSTIC)
516#define vm_map_lock_downgrade(map) \
517 do { \
086c1d7e 518 kprintf ("locking map LK_DOWNGRADE: 0x%x\n", map); \
df4f70a6 519 lockmgr(&(map)->lock, LK_DOWNGRADE); \
984263bc
MD
520 } while (0)
521#else
522#define vm_map_lock_downgrade(map) \
df4f70a6 523 lockmgr(&(map)->lock, LK_DOWNGRADE)
984263bc
MD
524#endif
525
d5fa53d4
HP
526#endif /* _KERNEL */
527
984263bc
MD
528/*
529 * Functions implemented as macros
530 */
47ec0953
MD
531#define vm_map_min(map) ((map)->min_addr)
532#define vm_map_max(map) ((map)->max_addr)
984263bc
MD
533#define vm_map_pmap(map) ((map)->pmap)
534
4a28fe22
MD
535/*
536 * Must not block
537 */
984263bc
MD
538static __inline struct pmap *
539vmspace_pmap(struct vmspace *vmspace)
540{
541 return &vmspace->vm_pmap;
542}
543
b12defdc
MD
544/*
545 * Caller must hold the vmspace->vm_map.token
546 */
984263bc
MD
547static __inline long
548vmspace_resident_count(struct vmspace *vmspace)
549{
550 return pmap_resident_count(vmspace_pmap(vmspace));
551}
552
a108bf71
MD
553/*
554 * Number of kernel maps and entries to statically allocate, required
555 * during boot to bootstrap the VM system.
556 */
984263bc 557#define MAX_KMAP 10
586f3381 558#define MAX_MAPENT (SMP_MAXCPU * 32 + 1024)
984263bc
MD
559
560/*
561 * Copy-on-write flags for vm_map operations
562 */
563#define MAP_UNUSED_01 0x0001
564#define MAP_COPY_ON_WRITE 0x0002
565#define MAP_NOFAULT 0x0004
566#define MAP_PREFAULT 0x0008
567#define MAP_PREFAULT_PARTIAL 0x0010
568#define MAP_DISABLE_SYNCER 0x0020
c809941b 569#define MAP_IS_STACK 0x0040
e40cfbd7 570#define MAP_IS_KSTACK 0x0080
984263bc
MD
571#define MAP_DISABLE_COREDUMP 0x0100
572#define MAP_PREFAULT_MADVISE 0x0200 /* from (user) madvise request */
ce94514e 573#define MAP_PREFAULT_RELOCK 0x0200
984263bc
MD
574
575/*
576 * vm_fault option flags
577 */
0035dca9
MD
578#define VM_FAULT_NORMAL 0x00 /* Nothing special */
579#define VM_FAULT_CHANGE_WIRING 0x01 /* Change the wiring as appropriate */
580#define VM_FAULT_USER_WIRE 0x02 /* Likewise, but for user purposes */
1b9d3514 581#define VM_FAULT_BURST 0x04 /* Burst fault can be done */
0035dca9 582#define VM_FAULT_DIRTY 0x08 /* Dirty the page */
9f3543c6 583#define VM_FAULT_UNSWAP 0x10 /* Remove backing store from the page */
54341a3b 584#define VM_FAULT_BURST_QUICK 0x20 /* Special case shared vm_object */
0035dca9 585#define VM_FAULT_WIRE_MASK (VM_FAULT_CHANGE_WIRING|VM_FAULT_USER_WIRE)
534ee349 586#define VM_FAULT_USERMODE 0x40
984263bc
MD
587
588#ifdef _KERNEL
e3161323 589
46754a20
MD
590boolean_t vm_map_check_protection (vm_map_t, vm_offset_t, vm_offset_t,
591 vm_prot_t, boolean_t);
984263bc 592struct pmap;
41a01a4d 593struct globaldata;
53025830 594void vm_map_entry_allocate_object(vm_map_entry_t);
41a01a4d 595void vm_map_entry_reserve_cpu_init(struct globaldata *gd);
a108bf71
MD
596int vm_map_entry_reserve(int);
597int vm_map_entry_kreserve(int);
598void vm_map_entry_release(int);
599void vm_map_entry_krelease(int);
a108bf71 600int vm_map_delete (vm_map_t, vm_offset_t, vm_offset_t, int *);
0adbcbd6
MD
601int vm_map_find (vm_map_t, void *, void *,
602 vm_ooffset_t, vm_offset_t *, vm_size_t,
3091de50
MD
603 vm_size_t, boolean_t,
604 vm_maptype_t, vm_subsys_t id,
0adbcbd6 605 vm_prot_t, vm_prot_t, int);
9388fcaa 606int vm_map_findspace (vm_map_t, vm_offset_t, vm_size_t, vm_size_t,
c809941b 607 int, vm_offset_t *);
911e30e2 608vm_offset_t vm_map_hint(struct proc *, vm_offset_t, vm_prot_t);
1388df65 609int vm_map_inherit (vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t);
e4846942 610void vm_map_init (struct vm_map *, vm_offset_t, vm_offset_t, pmap_t);
64b5a8a5
MD
611int vm_map_insert (vm_map_t, int *,
612 void *, void *,
613 vm_ooffset_t, void *,
614 vm_offset_t, vm_offset_t,
3091de50 615 vm_maptype_t, vm_subsys_t id,
0adbcbd6 616 vm_prot_t, vm_prot_t, int);
7a45978d 617int vm_map_lookup (vm_map_t *, vm_offset_t, vm_prot_t,
44293a80 618 vm_map_entry_t *, struct vm_map_backing **,
01251219 619 vm_pindex_t *, vm_pindex_t *, vm_prot_t *, int *);
a108bf71 620void vm_map_lookup_done (vm_map_t, vm_map_entry_t, int);
1388df65 621boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *);
949c56f8
MD
622int vm_map_kernel_wiring (vm_map_t, vm_offset_t, vm_offset_t, int);
623int vm_map_user_wiring (vm_map_t, vm_offset_t, vm_offset_t, boolean_t);
1388df65
RG
624int vm_map_clean (vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t);
625int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t);
626int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t);
627void vm_map_startup (void);
628int vm_map_submap (vm_map_t, vm_offset_t, vm_offset_t, vm_map_t);
afeabdca 629int vm_map_madvise (vm_map_t, vm_offset_t, vm_offset_t, int, off_t);
a108bf71 630void vm_map_simplify_entry (vm_map_t, vm_map_entry_t, int *);
1388df65
RG
631void vm_init2 (void);
632int vm_uiomove (vm_map_t, vm_object_t, off_t, int, vm_offset_t, int *);
d6924570 633int vm_map_stack (vm_map_t, vm_offset_t *, vm_size_t, int,
85d25bcf 634 vm_prot_t, vm_prot_t, int);
95270b7e 635int vm_map_growstack (vm_map_t map, vm_offset_t addr);
534ee349
MD
636vm_offset_t vmspace_swap_count (struct vmspace *vmspace);
637vm_offset_t vmspace_anonymous_count (struct vmspace *vmspace);
a108bf71 638void vm_map_set_wired_quick(vm_map_t map, vm_offset_t addr, vm_size_t size, int *);
641f3b0a 639void vm_map_transition_wait(vm_map_t map, int relock);
ff13bc52 640
fc531fbc
MD
641void vm_map_interlock(vm_map_t map, struct vm_map_ilock *ilock,
642 vm_offset_t ran_beg, vm_offset_t ran_end);
643void vm_map_deinterlock(vm_map_t map, struct vm_map_ilock *ilock);
644
645
dbefba87 646#if defined(__x86_64__) && defined(_KERNEL_VIRTUAL)
d63ed24b
MD
647int vkernel_module_memory_alloc(vm_offset_t *, size_t);
648void vkernel_module_memory_free(vm_offset_t, size_t);
649#endif
984263bc
MD
650
651#endif
1bd40720 652#endif /* _VM_VM_MAP_H_ */