2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * @(#)vm_map.h 8.9 (Berkeley) 5/17/95
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
54 * Carnegie Mellon requests users of this software to return to
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
64 * $FreeBSD: src/sys/vm/vm_map.h,v 1.54.2.5 2003/01/13 22:51:17 dillon Exp $
65 * $DragonFly: src/sys/vm/vm_map.h,v 1.14 2004/05/24 07:25:36 hmp Exp $
69 * Virtual memory map module definitions.
78 * vm_map_t the high-level address map data structure.
79 * vm_map_entry_t an entry in an address map.
82 typedef u_int vm_eflags_t;
85 * Objects which live in maps may be either VM objects, or
86 * another map (called a "sharing map") which denotes read-write
87 * sharing with other maps.
91 struct vm_object *vm_object; /* object object */
92 struct vm_map *sub_map; /* belongs to another map */
96 * Address map entries consist of start and end addresses,
97 * a VM object (or sharing map) and offset into that object,
98 * and user-exported inheritance and protection information.
99 * Also included is control information for virtual copy operations.
101 struct vm_map_entry {
102 struct vm_map_entry *prev; /* previous entry */
103 struct vm_map_entry *next; /* next entry */
104 vm_offset_t start; /* start address */
105 vm_offset_t end; /* end address */
106 vm_offset_t avail_ssize; /* amt can grow if this is a stack */
107 union vm_map_object object; /* object I point to */
108 vm_ooffset_t offset; /* offset into object */
109 vm_eflags_t eflags; /* map entry flags */
110 /* Only in task maps: */
111 vm_prot_t protection; /* protection code */
112 vm_prot_t max_protection; /* maximum protection */
113 vm_inherit_t inheritance; /* inheritance */
114 int wired_count; /* can be paged if = 0 */
115 vm_pindex_t lastr; /* last read */
118 #define MAP_ENTRY_NOSYNC 0x0001
119 #define MAP_ENTRY_IS_SUB_MAP 0x0002
120 #define MAP_ENTRY_COW 0x0004
121 #define MAP_ENTRY_NEEDS_COPY 0x0008
122 #define MAP_ENTRY_NOFAULT 0x0010
123 #define MAP_ENTRY_USER_WIRED 0x0020
125 #define MAP_ENTRY_BEHAV_NORMAL 0x0000 /* default behavior */
126 #define MAP_ENTRY_BEHAV_SEQUENTIAL 0x0040 /* expect sequential access */
127 #define MAP_ENTRY_BEHAV_RANDOM 0x0080 /* expect random access */
128 #define MAP_ENTRY_BEHAV_RESERVED 0x00C0 /* future use */
130 #define MAP_ENTRY_BEHAV_MASK 0x00C0
132 #define MAP_ENTRY_IN_TRANSITION 0x0100 /* entry being changed */
133 #define MAP_ENTRY_NEEDS_WAKEUP 0x0200 /* waiter's in transition */
134 #define MAP_ENTRY_NOCOREDUMP 0x0400 /* don't include in a core */
137 * flags for vm_map_[un]clip_range()
139 #define MAP_CLIP_NO_HOLES 0x0001
142 * This reserve count for vm_map_entry_reserve() should cover all nominal
143 * single-insertion operations, including any necessary clipping.
145 #define MAP_RESERVE_COUNT 4
146 #define MAP_RESERVE_SLOP 32
148 static __inline u_char
149 vm_map_entry_behavior(struct vm_map_entry *entry)
151 return entry->eflags & MAP_ENTRY_BEHAV_MASK;
155 vm_map_entry_set_behavior(struct vm_map_entry *entry, u_char behavior)
157 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
158 (behavior & MAP_ENTRY_BEHAV_MASK);
162 * Maps are doubly-linked lists of map entries, kept sorted
163 * by address. A single hint is provided to start
164 * searches again from the last successful search,
165 * insertion, or removal.
167 * Note: the lock structure cannot be the first element of vm_map
168 * because this can result in a running lockup between two or more
169 * system processes trying to kmem_alloc_wait() due to kmem_alloc_wait()
170 * and free tsleep/waking up 'map' and the underlying lockmgr also
171 * sleeping and waking up on 'map'. The lockup occurs when the map fills
172 * up. The 'exec' map, for example.
175 struct vm_map_entry header; /* List of entries */
176 struct lock lock; /* Lock for map data */
177 int nentries; /* Number of entries */
178 vm_size_t size; /* virtual size */
179 u_char system_map; /* Am I a system map? */
180 u_char infork; /* Am I in fork processing? */
181 vm_map_entry_t hint; /* hint for quick lookups */
182 unsigned int timestamp; /* Version number */
183 vm_map_entry_t first_free; /* First free space hint */
184 struct pmap *pmap; /* Physical map */
185 #define min_offset header.start
186 #define max_offset header.end
195 struct vmupcall *vu_next;
196 void *vu_func; /* user upcall function */
197 void *vu_data; /* user data */
198 void *vu_ctx; /* user context function */
199 struct proc *vu_proc; /* process that registered upcall */
200 int vu_id; /* upcall identifier */
201 int vu_pending; /* upcall request pending */
205 * Shareable process virtual address space.
206 * May eventually be merged with vm_map.
207 * Several fields are temporary (text, data stuff).
210 struct vm_map vm_map; /* VM address map */
211 struct pmap vm_pmap; /* private physical map */
212 int vm_refcnt; /* number of references */
213 caddr_t vm_shm; /* SYS5 shared memory private data XXX */
214 /* we copy from vm_startcopy to the end of the structure on fork */
215 #define vm_startcopy vm_rssize
216 segsz_t vm_rssize; /* current resident set size in pages */
217 segsz_t vm_swrss; /* resident set size before last swap */
218 segsz_t vm_tsize; /* text size (pages) XXX */
219 segsz_t vm_dsize; /* data size (pages) XXX */
220 segsz_t vm_ssize; /* stack size (pages) */
221 caddr_t vm_taddr; /* user virtual address of text XXX */
222 caddr_t vm_daddr; /* user virtual address of data XXX */
223 caddr_t vm_maxsaddr; /* user VA at max stack growth */
224 caddr_t vm_minsaddr; /* user VA at max stack growth */
225 #define vm_endcopy vm_exitingcnt
226 int vm_exitingcnt; /* several procsses zombied in exit1 */
227 int vm_upccount; /* number of registered upcalls */
228 struct vmupcall *vm_upcalls; /* registered upcalls */
232 * Resident executable holding structure. A user program can take a snapshot
233 * of just its VM address space (typically done just after dynamic link
234 * libraries have completed loading) and register it as a resident
235 * executable associated with the program binary's vnode, which is also
236 * locked into memory. Future execs of the vnode will start with a copy
237 * of the resident vmspace instead of running the binary from scratch,
238 * avoiding both the kernel ELF loader *AND* all shared library mapping and
239 * relocation code, and will call a different entry point (the stack pointer
240 * is reset to the top of the stack) supplied when the vmspace was registered.
243 struct vnode *vr_vnode; /* associated vnode */
244 TAILQ_ENTRY(vmresident) vr_link; /* linked list of res sts */
245 struct vmspace *vr_vmspace; /* vmspace to fork */
246 intptr_t vr_entry_addr; /* registered entry point */
247 struct sysentvec *vr_sysent; /* system call vects */
248 int vr_id; /* registration id */
253 * Macros: vm_map_lock, etc.
255 * Perform locking on the data portion of a map. Note that
256 * these macros mimic procedure calls returning void. The
257 * semicolon is supplied by the user of these macros, not
258 * by the macros themselves. The macros can safely be used
259 * as unbraced elements in a higher level statement.
263 /* #define MAP_LOCK_DIAGNOSTIC 1 */
264 #ifdef MAP_LOCK_DIAGNOSTIC
265 #define vm_map_lock(map) \
267 printf ("locking map LK_EXCLUSIVE: 0x%x\n", map); \
268 if (lockmgr(&(map)->lock, LK_EXCLUSIVE, NULL, curthread) != 0) { \
269 panic("vm_map_lock: failed to get lock"); \
271 (map)->timestamp++; \
274 #define vm_map_lock(map) \
276 if (lockmgr(&(map)->lock, LK_EXCLUSIVE, NULL, curthread) != 0) { \
277 panic("vm_map_lock: failed to get lock"); \
279 (map)->timestamp++; \
283 #define vm_map_lock(map) \
285 lockmgr(&(map)->lock, LK_EXCLUSIVE, NULL, curthread); \
286 (map)->timestamp++; \
288 #endif /* DIAGNOSTIC */
290 #if defined(MAP_LOCK_DIAGNOSTIC)
291 #define vm_map_unlock(map) \
293 printf ("locking map LK_RELEASE: 0x%x\n", map); \
294 lockmgr(&(map)->lock, LK_RELEASE, NULL, curthread); \
296 #define vm_map_lock_read(map) \
298 printf ("locking map LK_SHARED: 0x%x\n", map); \
299 lockmgr(&(map)->lock, LK_SHARED, NULL, curthread); \
301 #define vm_map_unlock_read(map) \
303 printf ("locking map LK_RELEASE: 0x%x\n", map); \
304 lockmgr(&(map)->lock, LK_RELEASE, NULL, curthread); \
307 #define vm_map_unlock(map) \
308 lockmgr(&(map)->lock, LK_RELEASE, NULL, curthread)
309 #define vm_map_lock_read(map) \
310 lockmgr(&(map)->lock, LK_SHARED, NULL, curthread)
311 #define vm_map_unlock_read(map) \
312 lockmgr(&(map)->lock, LK_RELEASE, NULL, curthread)
315 static __inline__ int
316 _vm_map_lock_upgrade(vm_map_t map, struct thread *td) {
318 #if defined(MAP_LOCK_DIAGNOSTIC)
319 printf("locking map LK_EXCLUPGRADE: 0x%x\n", map);
321 error = lockmgr(&map->lock, LK_EXCLUPGRADE, NULL, td);
327 #define vm_map_lock_upgrade(map) _vm_map_lock_upgrade(map, curthread)
329 #if defined(MAP_LOCK_DIAGNOSTIC)
330 #define vm_map_lock_downgrade(map) \
332 printf ("locking map LK_DOWNGRADE: 0x%x\n", map); \
333 lockmgr(&(map)->lock, LK_DOWNGRADE, NULL, curthread); \
336 #define vm_map_lock_downgrade(map) \
337 lockmgr(&(map)->lock, LK_DOWNGRADE, NULL, curthread)
340 #define vm_map_set_recursive(map) \
343 lwkt_gettoken(&ilock, &(map)->lock.lk_interlock); \
344 (map)->lock.lk_flags |= LK_CANRECURSE; \
345 lwkt_reltoken(&ilock); \
347 #define vm_map_clear_recursive(map) \
350 lwkt_gettoken(&ilock, &(map)->lock.lk_interlock); \
351 (map)->lock.lk_flags &= ~LK_CANRECURSE; \
352 lwkt_reltoken(&ilock); \
358 * Functions implemented as macros
360 #define vm_map_min(map) ((map)->min_offset)
361 #define vm_map_max(map) ((map)->max_offset)
362 #define vm_map_pmap(map) ((map)->pmap)
364 static __inline struct pmap *
365 vmspace_pmap(struct vmspace *vmspace)
367 return &vmspace->vm_pmap;
371 vmspace_resident_count(struct vmspace *vmspace)
373 return pmap_resident_count(vmspace_pmap(vmspace));
377 * Number of kernel maps and entries to statically allocate, required
378 * during boot to bootstrap the VM system.
381 #define MAX_MAPENT 256
384 * Copy-on-write flags for vm_map operations
386 #define MAP_UNUSED_01 0x0001
387 #define MAP_COPY_ON_WRITE 0x0002
388 #define MAP_NOFAULT 0x0004
389 #define MAP_PREFAULT 0x0008
390 #define MAP_PREFAULT_PARTIAL 0x0010
391 #define MAP_DISABLE_SYNCER 0x0020
392 #define MAP_DISABLE_COREDUMP 0x0100
393 #define MAP_PREFAULT_MADVISE 0x0200 /* from (user) madvise request */
396 * vm_fault option flags
398 #define VM_FAULT_NORMAL 0 /* Nothing special */
399 #define VM_FAULT_CHANGE_WIRING 1 /* Change the wiring as appropriate */
400 #define VM_FAULT_USER_WIRE 2 /* Likewise, but for user purposes */
401 #define VM_FAULT_WIRE_MASK (VM_FAULT_CHANGE_WIRING|VM_FAULT_USER_WIRE)
402 #define VM_FAULT_HOLD 4 /* Hold the page */
403 #define VM_FAULT_DIRTY 8 /* Dirty the page */
406 boolean_t vm_map_check_protection (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t);
409 void vm_map_entry_reserve_cpu_init(struct globaldata *gd);
410 int vm_map_entry_reserve(int);
411 int vm_map_entry_kreserve(int);
412 void vm_map_entry_release(int);
413 void vm_map_entry_krelease(int);
414 vm_map_t vm_map_create (struct pmap *, vm_offset_t, vm_offset_t);
415 int vm_map_delete (vm_map_t, vm_offset_t, vm_offset_t, int *);
416 int vm_map_find (vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size_t, boolean_t, vm_prot_t, vm_prot_t, int);
417 int vm_map_findspace (vm_map_t, vm_offset_t, vm_size_t, vm_offset_t, vm_offset_t *);
418 int vm_map_inherit (vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t);
419 void vm_map_init (struct vm_map *, vm_offset_t, vm_offset_t);
420 int vm_map_insert (vm_map_t, int *, vm_object_t, vm_ooffset_t, vm_offset_t, vm_offset_t, vm_prot_t, vm_prot_t, int);
421 int vm_map_lookup (vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *,
422 vm_pindex_t *, vm_prot_t *, boolean_t *);
423 void vm_map_lookup_done (vm_map_t, vm_map_entry_t, int);
424 boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *);
425 int vm_map_wire (vm_map_t, vm_offset_t, vm_offset_t, int);
426 int vm_map_unwire (vm_map_t, vm_offset_t, vm_offset_t, boolean_t);
427 int vm_map_clean (vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t);
428 int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t);
429 int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t);
430 void vm_map_startup (void);
431 int vm_map_submap (vm_map_t, vm_offset_t, vm_offset_t, vm_map_t);
432 int vm_map_madvise (vm_map_t, vm_offset_t, vm_offset_t, int);
433 void vm_map_simplify_entry (vm_map_t, vm_map_entry_t, int *);
434 void vm_init2 (void);
435 int vm_uiomove (vm_map_t, vm_object_t, off_t, int, vm_offset_t, int *);
436 void vm_freeze_copyopts (vm_object_t, vm_pindex_t, vm_pindex_t);
437 int vm_map_stack (vm_map_t, vm_offset_t, vm_size_t, vm_prot_t, vm_prot_t, int);
438 int vm_map_growstack (struct proc *p, vm_offset_t addr);
439 int vmspace_swap_count (struct vmspace *vmspace);
440 void vm_map_set_wired_quick(vm_map_t map, vm_offset_t addr, vm_size_t size, int *);
443 #endif /* _VM_MAP_ */