| 1 | /* |
| 2 | * Copyright (c) 1991, 1993 |
| 3 | * The Regents of the University of California. All rights reserved. |
| 4 | * |
| 5 | * This code is derived from software contributed to Berkeley by |
| 6 | * The Mach Operating System project at Carnegie-Mellon University. |
| 7 | * |
| 8 | * Redistribution and use in source and binary forms, with or without |
| 9 | * modification, are permitted provided that the following conditions |
| 10 | * are met: |
| 11 | * 1. Redistributions of source code must retain the above copyright |
| 12 | * notice, this list of conditions and the following disclaimer. |
| 13 | * 2. Redistributions in binary form must reproduce the above copyright |
| 14 | * notice, this list of conditions and the following disclaimer in the |
| 15 | * documentation and/or other materials provided with the distribution. |
| 16 | * 3. All advertising materials mentioning features or use of this software |
| 17 | * must display the following acknowledgement: |
| 18 | * This product includes software developed by the University of |
| 19 | * California, Berkeley and its contributors. |
| 20 | * 4. Neither the name of the University nor the names of its contributors |
| 21 | * may be used to endorse or promote products derived from this software |
| 22 | * without specific prior written permission. |
| 23 | * |
| 24 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
| 25 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 26 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 27 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
| 28 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 29 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 30 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 31 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 32 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 33 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 34 | * SUCH DAMAGE. |
| 35 | * |
| 36 | * @(#)vm_map.h 8.9 (Berkeley) 5/17/95 |
| 37 | * |
| 38 | * |
| 39 | * Copyright (c) 1987, 1990 Carnegie-Mellon University. |
| 40 | * All rights reserved. |
| 41 | * |
| 42 | * Authors: Avadis Tevanian, Jr., Michael Wayne Young |
| 43 | * |
| 44 | * Permission to use, copy, modify and distribute this software and |
| 45 | * its documentation is hereby granted, provided that both the copyright |
| 46 | * notice and this permission notice appear in all copies of the |
| 47 | * software, derivative works or modified versions, and any portions |
| 48 | * thereof, and that both notices appear in supporting documentation. |
| 49 | * |
| 50 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
| 51 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND |
| 52 | * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
| 53 | * |
| 54 | * Carnegie Mellon requests users of this software to return to |
| 55 | * |
| 56 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
| 57 | * School of Computer Science |
| 58 | * Carnegie Mellon University |
| 59 | * Pittsburgh PA 15213-3890 |
| 60 | * |
| 61 | * any improvements or extensions that they make and grant Carnegie the |
| 62 | * rights to redistribute these changes. |
| 63 | * |
| 64 | * $FreeBSD: src/sys/vm/vm_map.h,v 1.54.2.5 2003/01/13 22:51:17 dillon Exp $ |
| 65 | * $DragonFly: src/sys/vm/vm_map.h,v 1.3 2003/06/25 03:56:13 dillon Exp $ |
| 66 | */ |
| 67 | |
| 68 | /* |
| 69 | * Virtual memory map module definitions. |
| 70 | */ |
| 71 | |
| 72 | #ifndef _VM_MAP_ |
| 73 | #define _VM_MAP_ |
| 74 | |
| 75 | /* |
| 76 | * Types defined: |
| 77 | * |
| 78 | * vm_map_t the high-level address map data structure. |
| 79 | * vm_map_entry_t an entry in an address map. |
| 80 | */ |
| 81 | |
| 82 | typedef u_int vm_eflags_t; |
| 83 | |
| 84 | /* |
| 85 | * Objects which live in maps may be either VM objects, or |
| 86 | * another map (called a "sharing map") which denotes read-write |
| 87 | * sharing with other maps. |
| 88 | */ |
| 89 | |
| 90 | union vm_map_object { |
| 91 | struct vm_object *vm_object; /* object object */ |
| 92 | struct vm_map *sub_map; /* belongs to another map */ |
| 93 | }; |
| 94 | |
| 95 | /* |
| 96 | * Address map entries consist of start and end addresses, |
| 97 | * a VM object (or sharing map) and offset into that object, |
| 98 | * and user-exported inheritance and protection information. |
| 99 | * Also included is control information for virtual copy operations. |
| 100 | */ |
| 101 | struct vm_map_entry { |
| 102 | struct vm_map_entry *prev; /* previous entry */ |
| 103 | struct vm_map_entry *next; /* next entry */ |
| 104 | vm_offset_t start; /* start address */ |
| 105 | vm_offset_t end; /* end address */ |
| 106 | vm_offset_t avail_ssize; /* amt can grow if this is a stack */ |
| 107 | union vm_map_object object; /* object I point to */ |
| 108 | vm_ooffset_t offset; /* offset into object */ |
| 109 | vm_eflags_t eflags; /* map entry flags */ |
| 110 | /* Only in task maps: */ |
| 111 | vm_prot_t protection; /* protection code */ |
| 112 | vm_prot_t max_protection; /* maximum protection */ |
| 113 | vm_inherit_t inheritance; /* inheritance */ |
| 114 | int wired_count; /* can be paged if = 0 */ |
| 115 | vm_pindex_t lastr; /* last read */ |
| 116 | }; |
| 117 | |
| 118 | #define MAP_ENTRY_NOSYNC 0x0001 |
| 119 | #define MAP_ENTRY_IS_SUB_MAP 0x0002 |
| 120 | #define MAP_ENTRY_COW 0x0004 |
| 121 | #define MAP_ENTRY_NEEDS_COPY 0x0008 |
| 122 | #define MAP_ENTRY_NOFAULT 0x0010 |
| 123 | #define MAP_ENTRY_USER_WIRED 0x0020 |
| 124 | |
| 125 | #define MAP_ENTRY_BEHAV_NORMAL 0x0000 /* default behavior */ |
| 126 | #define MAP_ENTRY_BEHAV_SEQUENTIAL 0x0040 /* expect sequential access */ |
| 127 | #define MAP_ENTRY_BEHAV_RANDOM 0x0080 /* expect random access */ |
| 128 | #define MAP_ENTRY_BEHAV_RESERVED 0x00C0 /* future use */ |
| 129 | |
| 130 | #define MAP_ENTRY_BEHAV_MASK 0x00C0 |
| 131 | |
| 132 | #define MAP_ENTRY_IN_TRANSITION 0x0100 /* entry being changed */ |
| 133 | #define MAP_ENTRY_NEEDS_WAKEUP 0x0200 /* waiter's in transition */ |
| 134 | #define MAP_ENTRY_NOCOREDUMP 0x0400 /* don't include in a core */ |
| 135 | |
| 136 | /* |
| 137 | * flags for vm_map_[un]clip_range() |
| 138 | */ |
| 139 | #define MAP_CLIP_NO_HOLES 0x0001 |
| 140 | |
| 141 | static __inline u_char |
| 142 | vm_map_entry_behavior(struct vm_map_entry *entry) |
| 143 | { |
| 144 | return entry->eflags & MAP_ENTRY_BEHAV_MASK; |
| 145 | } |
| 146 | |
| 147 | static __inline void |
| 148 | vm_map_entry_set_behavior(struct vm_map_entry *entry, u_char behavior) |
| 149 | { |
| 150 | entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | |
| 151 | (behavior & MAP_ENTRY_BEHAV_MASK); |
| 152 | } |
| 153 | |
| 154 | /* |
| 155 | * Maps are doubly-linked lists of map entries, kept sorted |
| 156 | * by address. A single hint is provided to start |
| 157 | * searches again from the last successful search, |
| 158 | * insertion, or removal. |
| 159 | * |
| 160 | * Note: the lock structure cannot be the first element of vm_map |
| 161 | * because this can result in a running lockup between two or more |
| 162 | * system processes trying to kmem_alloc_wait() due to kmem_alloc_wait() |
| 163 | * and free tsleep/waking up 'map' and the underlying lockmgr also |
| 164 | * sleeping and waking up on 'map'. The lockup occurs when the map fills |
| 165 | * up. The 'exec' map, for example. |
| 166 | */ |
| 167 | struct vm_map { |
| 168 | struct vm_map_entry header; /* List of entries */ |
| 169 | struct lock lock; /* Lock for map data */ |
| 170 | int nentries; /* Number of entries */ |
| 171 | vm_size_t size; /* virtual size */ |
| 172 | u_char system_map; /* Am I a system map? */ |
| 173 | u_char infork; /* Am I in fork processing? */ |
| 174 | vm_map_entry_t hint; /* hint for quick lookups */ |
| 175 | unsigned int timestamp; /* Version number */ |
| 176 | vm_map_entry_t first_free; /* First free space hint */ |
| 177 | struct pmap *pmap; /* Physical map */ |
| 178 | #define min_offset header.start |
| 179 | #define max_offset header.end |
| 180 | }; |
| 181 | |
| 182 | /* |
| 183 | * Shareable process virtual address space. |
| 184 | * May eventually be merged with vm_map. |
| 185 | * Several fields are temporary (text, data stuff). |
| 186 | */ |
| 187 | struct vmspace { |
| 188 | struct vm_map vm_map; /* VM address map */ |
| 189 | struct pmap vm_pmap; /* private physical map */ |
| 190 | int vm_refcnt; /* number of references */ |
| 191 | caddr_t vm_shm; /* SYS5 shared memory private data XXX */ |
| 192 | /* we copy from vm_startcopy to the end of the structure on fork */ |
| 193 | #define vm_startcopy vm_rssize |
| 194 | segsz_t vm_rssize; /* current resident set size in pages */ |
| 195 | segsz_t vm_swrss; /* resident set size before last swap */ |
| 196 | segsz_t vm_tsize; /* text size (pages) XXX */ |
| 197 | segsz_t vm_dsize; /* data size (pages) XXX */ |
| 198 | segsz_t vm_ssize; /* stack size (pages) */ |
| 199 | caddr_t vm_taddr; /* user virtual address of text XXX */ |
| 200 | caddr_t vm_daddr; /* user virtual address of data XXX */ |
| 201 | caddr_t vm_maxsaddr; /* user VA at max stack growth */ |
| 202 | caddr_t vm_minsaddr; /* user VA at max stack growth */ |
| 203 | int vm_exitingcnt; /* several procsses zombied in exit1 */ |
| 204 | }; |
| 205 | |
| 206 | /* |
| 207 | * Macros: vm_map_lock, etc. |
| 208 | * Function: |
| 209 | * Perform locking on the data portion of a map. Note that |
| 210 | * these macros mimic procedure calls returning void. The |
| 211 | * semicolon is supplied by the user of these macros, not |
| 212 | * by the macros themselves. The macros can safely be used |
| 213 | * as unbraced elements in a higher level statement. |
| 214 | */ |
| 215 | |
| 216 | #define vm_map_lock_drain_interlock(map) \ |
| 217 | do { \ |
| 218 | lockmgr(&(map)->lock, LK_DRAIN|LK_INTERLOCK, \ |
| 219 | &(map)->ref_lock, curthread); \ |
| 220 | (map)->timestamp++; \ |
| 221 | } while(0) |
| 222 | |
| 223 | #ifdef DIAGNOSTIC |
| 224 | /* #define MAP_LOCK_DIAGNOSTIC 1 */ |
| 225 | #ifdef MAP_LOCK_DIAGNOSTIC |
| 226 | #define vm_map_lock(map) \ |
| 227 | do { \ |
| 228 | printf ("locking map LK_EXCLUSIVE: 0x%x\n", map); \ |
| 229 | if (lockmgr(&(map)->lock, LK_EXCLUSIVE, (void *)0, curthread) != 0) { \ |
| 230 | panic("vm_map_lock: failed to get lock"); \ |
| 231 | } \ |
| 232 | (map)->timestamp++; \ |
| 233 | } while(0) |
| 234 | #else |
| 235 | #define vm_map_lock(map) \ |
| 236 | do { \ |
| 237 | if (lockmgr(&(map)->lock, LK_EXCLUSIVE, (void *)0, curthread) != 0) { \ |
| 238 | panic("vm_map_lock: failed to get lock"); \ |
| 239 | } \ |
| 240 | (map)->timestamp++; \ |
| 241 | } while(0) |
| 242 | #endif |
| 243 | #else |
| 244 | #define vm_map_lock(map) \ |
| 245 | do { \ |
| 246 | lockmgr(&(map)->lock, LK_EXCLUSIVE, (void *)0, curthread); \ |
| 247 | (map)->timestamp++; \ |
| 248 | } while(0) |
| 249 | #endif /* DIAGNOSTIC */ |
| 250 | |
| 251 | #if defined(MAP_LOCK_DIAGNOSTIC) |
| 252 | #define vm_map_unlock(map) \ |
| 253 | do { \ |
| 254 | printf ("locking map LK_RELEASE: 0x%x\n", map); \ |
| 255 | lockmgr(&(map)->lock, LK_RELEASE, (void *)0, curthread); \ |
| 256 | } while (0) |
| 257 | #define vm_map_lock_read(map) \ |
| 258 | do { \ |
| 259 | printf ("locking map LK_SHARED: 0x%x\n", map); \ |
| 260 | lockmgr(&(map)->lock, LK_SHARED, (void *)0, curthread); \ |
| 261 | } while (0) |
| 262 | #define vm_map_unlock_read(map) \ |
| 263 | do { \ |
| 264 | printf ("locking map LK_RELEASE: 0x%x\n", map); \ |
| 265 | lockmgr(&(map)->lock, LK_RELEASE, (void *)0, curthread); \ |
| 266 | } while (0) |
| 267 | #else |
| 268 | #define vm_map_unlock(map) \ |
| 269 | lockmgr(&(map)->lock, LK_RELEASE, (void *)0, curthread) |
| 270 | #define vm_map_lock_read(map) \ |
| 271 | lockmgr(&(map)->lock, LK_SHARED, (void *)0, curthread) |
| 272 | #define vm_map_unlock_read(map) \ |
| 273 | lockmgr(&(map)->lock, LK_RELEASE, (void *)0, curthread) |
| 274 | #endif |
| 275 | |
| 276 | static __inline__ int |
| 277 | _vm_map_lock_upgrade(vm_map_t map, struct thread *td) { |
| 278 | int error; |
| 279 | #if defined(MAP_LOCK_DIAGNOSTIC) |
| 280 | printf("locking map LK_EXCLUPGRADE: 0x%x\n", map); |
| 281 | #endif |
| 282 | error = lockmgr(&map->lock, LK_EXCLUPGRADE, (void *)0, td); |
| 283 | if (error == 0) |
| 284 | map->timestamp++; |
| 285 | return error; |
| 286 | } |
| 287 | |
| 288 | #define vm_map_lock_upgrade(map) _vm_map_lock_upgrade(map, curthread) |
| 289 | |
| 290 | #if defined(MAP_LOCK_DIAGNOSTIC) |
| 291 | #define vm_map_lock_downgrade(map) \ |
| 292 | do { \ |
| 293 | printf ("locking map LK_DOWNGRADE: 0x%x\n", map); \ |
| 294 | lockmgr(&(map)->lock, LK_DOWNGRADE, (void *)0, curthread); \ |
| 295 | } while (0) |
| 296 | #else |
| 297 | #define vm_map_lock_downgrade(map) \ |
| 298 | lockmgr(&(map)->lock, LK_DOWNGRADE, (void *)0, curthread) |
| 299 | #endif |
| 300 | |
| 301 | #define vm_map_set_recursive(map) \ |
| 302 | do { \ |
| 303 | simple_lock(&(map)->lock.lk_interlock); \ |
| 304 | (map)->lock.lk_flags |= LK_CANRECURSE; \ |
| 305 | simple_unlock(&(map)->lock.lk_interlock); \ |
| 306 | } while(0) |
| 307 | #define vm_map_clear_recursive(map) \ |
| 308 | do { \ |
| 309 | simple_lock(&(map)->lock.lk_interlock); \ |
| 310 | (map)->lock.lk_flags &= ~LK_CANRECURSE; \ |
| 311 | simple_unlock(&(map)->lock.lk_interlock); \ |
| 312 | } while(0) |
| 313 | |
| 314 | /* |
| 315 | * Functions implemented as macros |
| 316 | */ |
| 317 | #define vm_map_min(map) ((map)->min_offset) |
| 318 | #define vm_map_max(map) ((map)->max_offset) |
| 319 | #define vm_map_pmap(map) ((map)->pmap) |
| 320 | |
| 321 | static __inline struct pmap * |
| 322 | vmspace_pmap(struct vmspace *vmspace) |
| 323 | { |
| 324 | return &vmspace->vm_pmap; |
| 325 | } |
| 326 | |
| 327 | static __inline long |
| 328 | vmspace_resident_count(struct vmspace *vmspace) |
| 329 | { |
| 330 | return pmap_resident_count(vmspace_pmap(vmspace)); |
| 331 | } |
| 332 | |
| 333 | /* XXX: number of kernel maps and entries to statically allocate */ |
| 334 | #define MAX_KMAP 10 |
| 335 | #define MAX_KMAPENT 128 |
| 336 | #define MAX_MAPENT 128 |
| 337 | |
| 338 | /* |
| 339 | * Copy-on-write flags for vm_map operations |
| 340 | */ |
| 341 | #define MAP_UNUSED_01 0x0001 |
| 342 | #define MAP_COPY_ON_WRITE 0x0002 |
| 343 | #define MAP_NOFAULT 0x0004 |
| 344 | #define MAP_PREFAULT 0x0008 |
| 345 | #define MAP_PREFAULT_PARTIAL 0x0010 |
| 346 | #define MAP_DISABLE_SYNCER 0x0020 |
| 347 | #define MAP_DISABLE_COREDUMP 0x0100 |
| 348 | #define MAP_PREFAULT_MADVISE 0x0200 /* from (user) madvise request */ |
| 349 | |
| 350 | /* |
| 351 | * vm_fault option flags |
| 352 | */ |
| 353 | #define VM_FAULT_NORMAL 0 /* Nothing special */ |
| 354 | #define VM_FAULT_CHANGE_WIRING 1 /* Change the wiring as appropriate */ |
| 355 | #define VM_FAULT_USER_WIRE 2 /* Likewise, but for user purposes */ |
| 356 | #define VM_FAULT_WIRE_MASK (VM_FAULT_CHANGE_WIRING|VM_FAULT_USER_WIRE) |
| 357 | #define VM_FAULT_HOLD 4 /* Hold the page */ |
| 358 | #define VM_FAULT_DIRTY 8 /* Dirty the page */ |
| 359 | |
| 360 | #ifdef _KERNEL |
| 361 | boolean_t vm_map_check_protection __P((vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t)); |
| 362 | struct pmap; |
| 363 | vm_map_t vm_map_create __P((struct pmap *, vm_offset_t, vm_offset_t)); |
| 364 | int vm_map_delete __P((vm_map_t, vm_offset_t, vm_offset_t)); |
| 365 | int vm_map_find __P((vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size_t, boolean_t, vm_prot_t, vm_prot_t, int)); |
| 366 | int vm_map_findspace __P((vm_map_t, vm_offset_t, vm_size_t, vm_offset_t *)); |
| 367 | int vm_map_inherit __P((vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t)); |
| 368 | void vm_map_init __P((struct vm_map *, vm_offset_t, vm_offset_t)); |
| 369 | int vm_map_insert __P((vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t, vm_offset_t, vm_prot_t, vm_prot_t, int)); |
| 370 | int vm_map_lookup __P((vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *, |
| 371 | vm_pindex_t *, vm_prot_t *, boolean_t *)); |
| 372 | void vm_map_lookup_done __P((vm_map_t, vm_map_entry_t)); |
| 373 | boolean_t vm_map_lookup_entry __P((vm_map_t, vm_offset_t, vm_map_entry_t *)); |
| 374 | int vm_map_pageable __P((vm_map_t, vm_offset_t, vm_offset_t, boolean_t)); |
| 375 | int vm_map_user_pageable __P((vm_map_t, vm_offset_t, vm_offset_t, boolean_t)); |
| 376 | int vm_map_clean __P((vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t)); |
| 377 | int vm_map_protect __P((vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t)); |
| 378 | int vm_map_remove __P((vm_map_t, vm_offset_t, vm_offset_t)); |
| 379 | void vm_map_startup __P((void)); |
| 380 | int vm_map_submap __P((vm_map_t, vm_offset_t, vm_offset_t, vm_map_t)); |
| 381 | int vm_map_madvise __P((vm_map_t, vm_offset_t, vm_offset_t, int)); |
| 382 | void vm_map_simplify_entry __P((vm_map_t, vm_map_entry_t)); |
| 383 | void vm_init2 __P((void)); |
| 384 | int vm_uiomove __P((vm_map_t, vm_object_t, off_t, int, vm_offset_t, int *)); |
| 385 | void vm_freeze_copyopts __P((vm_object_t, vm_pindex_t, vm_pindex_t)); |
| 386 | int vm_map_stack __P((vm_map_t, vm_offset_t, vm_size_t, vm_prot_t, vm_prot_t, int)); |
| 387 | int vm_map_growstack __P((struct proc *p, vm_offset_t addr)); |
| 388 | int vmspace_swap_count __P((struct vmspace *vmspace)); |
| 389 | |
| 390 | #endif |
| 391 | #endif /* _VM_MAP_ */ |