contrib/bmake: remove 2 local modifications to prepare for merge
[dragonfly.git] / sys / vm / vm_map.c
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1991, 1993
5  *      The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *      from: @(#)vm_map.c      8.3 (Berkeley) 1/12/94
35  *
36  *
37  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38  * All rights reserved.
39  *
40  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41  *
42  * Permission to use, copy, modify and distribute this software and
43  * its documentation is hereby granted, provided that both the copyright
44  * notice and this permission notice appear in all copies of the
45  * software, derivative works or modified versions, and any portions
46  * thereof, and that both notices appear in supporting documentation.
47  *
48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51  *
52  * Carnegie Mellon requests users of this software to return to
53  *
54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55  *  School of Computer Science
56  *  Carnegie Mellon University
57  *  Pittsburgh PA 15213-3890
58  *
59  * any improvements or extensions that they make and grant Carnegie the
60  * rights to redistribute these changes.
61  *
62  * $FreeBSD: src/sys/vm/vm_map.c,v 1.187.2.19 2003/05/27 00:47:02 alc Exp $
63  */
64
65 /*
66  *      Virtual memory mapping module.
67  */
68
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kernel.h>
72 #include <sys/proc.h>
73 #include <sys/serialize.h>
74 #include <sys/lock.h>
75 #include <sys/vmmeter.h>
76 #include <sys/mman.h>
77 #include <sys/vnode.h>
78 #include <sys/resourcevar.h>
79 #include <sys/shm.h>
80 #include <sys/tree.h>
81 #include <sys/malloc.h>
82 #include <sys/objcache.h>
83
84 #include <vm/vm.h>
85 #include <vm/vm_param.h>
86 #include <vm/pmap.h>
87 #include <vm/vm_map.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_object.h>
90 #include <vm/vm_pager.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_extern.h>
93 #include <vm/swap_pager.h>
94 #include <vm/vm_zone.h>
95
96 #include <sys/thread2.h>
97 #include <sys/random.h>
98 #include <sys/sysctl.h>
99
100 /*
101  * Virtual memory maps provide for the mapping, protection, and sharing
102  * of virtual memory objects.  In addition, this module provides for an
103  * efficient virtual copy of memory from one map to another.
104  *
105  * Synchronization is required prior to most operations.
106  *
107  * Maps consist of an ordered doubly-linked list of simple entries.
108  * A hint and a RB tree is used to speed-up lookups.
109  *
110  * Callers looking to modify maps specify start/end addresses which cause
111  * the related map entry to be clipped if necessary, and then later
112  * recombined if the pieces remained compatible.
113  *
114  * Virtual copy operations are performed by copying VM object references
115  * from one map to another, and then marking both regions as copy-on-write.
116  */
117 static boolean_t vmspace_ctor(void *obj, void *privdata, int ocflags);
118 static void vmspace_dtor(void *obj, void *privdata);
119 static void vmspace_terminate(struct vmspace *vm, int final);
120
121 MALLOC_DEFINE(M_VMSPACE, "vmspace", "vmspace objcache backingstore");
122 static struct objcache *vmspace_cache;
123
124 /*
125  * per-cpu page table cross mappings are initialized in early boot
126  * and might require a considerable number of vm_map_entry structures.
127  */
128 #define MAPENTRYBSP_CACHE       (MAXCPU+1)
129 #define MAPENTRYAP_CACHE        8
130
131 static struct vm_zone mapentzone_store, mapzone_store;
132 static vm_zone_t mapentzone, mapzone;
133 static struct vm_object mapentobj, mapobj;
134
135 static struct vm_map_entry map_entry_init[MAX_MAPENT];
136 static struct vm_map_entry cpu_map_entry_init_bsp[MAPENTRYBSP_CACHE];
137 static struct vm_map_entry cpu_map_entry_init_ap[MAXCPU][MAPENTRYAP_CACHE];
138 static struct vm_map map_init[MAX_KMAP];
139
140 static int randomize_mmap;
141 SYSCTL_INT(_vm, OID_AUTO, randomize_mmap, CTLFLAG_RW, &randomize_mmap, 0,
142     "Randomize mmap offsets");
143 static int vm_map_relock_enable = 1;
144 SYSCTL_INT(_vm, OID_AUTO, map_relock_enable, CTLFLAG_RW,
145            &vm_map_relock_enable, 0, "Randomize mmap offsets");
146
147 static void vm_map_entry_shadow(vm_map_entry_t entry, int addref);
148 static vm_map_entry_t vm_map_entry_create(vm_map_t map, int *);
149 static void vm_map_entry_dispose (vm_map_t map, vm_map_entry_t entry, int *);
150 static void _vm_map_clip_end (vm_map_t, vm_map_entry_t, vm_offset_t, int *);
151 static void _vm_map_clip_start (vm_map_t, vm_map_entry_t, vm_offset_t, int *);
152 static void vm_map_entry_delete (vm_map_t, vm_map_entry_t, int *);
153 static void vm_map_entry_unwire (vm_map_t, vm_map_entry_t);
154 static void vm_map_copy_entry (vm_map_t, vm_map_t, vm_map_entry_t,
155                 vm_map_entry_t);
156 static void vm_map_unclip_range (vm_map_t map, vm_map_entry_t start_entry, vm_offset_t start, vm_offset_t end, int *count, int flags);
157
158 /*
159  * Initialize the vm_map module.  Must be called before any other vm_map
160  * routines.
161  *
162  * Map and entry structures are allocated from the general purpose
163  * memory pool with some exceptions:
164  *
165  *      - The kernel map is allocated statically.
166  *      - Initial kernel map entries are allocated out of a static pool.
167  *      - We must set ZONE_SPECIAL here or the early boot code can get
168  *        stuck if there are >63 cores.
169  *
170  *      These restrictions are necessary since malloc() uses the
171  *      maps and requires map entries.
172  *
173  * Called from the low level boot code only.
174  */
175 void
176 vm_map_startup(void)
177 {
178         mapzone = &mapzone_store;
179         zbootinit(mapzone, "MAP", sizeof (struct vm_map),
180                 map_init, MAX_KMAP);
181         mapentzone = &mapentzone_store;
182         zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry),
183                   map_entry_init, MAX_MAPENT);
184         mapentzone_store.zflags |= ZONE_SPECIAL;
185 }
186
187 /*
188  * Called prior to any vmspace allocations.
189  *
190  * Called from the low level boot code only.
191  */
192 void
193 vm_init2(void) 
194 {
195         vmspace_cache = objcache_create_mbacked(M_VMSPACE,
196                                                 sizeof(struct vmspace),
197                                                 0, ncpus * 4,
198                                                 vmspace_ctor, vmspace_dtor,
199                                                 NULL);
200         zinitna(mapentzone, &mapentobj, NULL, 0, 0, 
201                 ZONE_USE_RESERVE | ZONE_SPECIAL, 1);
202         zinitna(mapzone, &mapobj, NULL, 0, 0, 0, 1);
203         pmap_init2();
204         vm_object_init2();
205 }
206
207 /*
208  * objcache support.  We leave the pmap root cached as long as possible
209  * for performance reasons.
210  */
211 static
212 boolean_t
213 vmspace_ctor(void *obj, void *privdata, int ocflags)
214 {
215         struct vmspace *vm = obj;
216
217         bzero(vm, sizeof(*vm));
218         vm->vm_refcnt = (u_int)-1;
219
220         return 1;
221 }
222
223 static
224 void
225 vmspace_dtor(void *obj, void *privdata)
226 {
227         struct vmspace *vm = obj;
228
229         KKASSERT(vm->vm_refcnt == (u_int)-1);
230         pmap_puninit(vmspace_pmap(vm));
231 }
232
233 /*
234  * Red black tree functions
235  *
236  * The caller must hold the related map lock.
237  */
238 static int rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b);
239 RB_GENERATE(vm_map_rb_tree, vm_map_entry, rb_entry, rb_vm_map_compare);
240
241 /* a->start is address, and the only field has to be initialized */
242 static int
243 rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b)
244 {
245         if (a->start < b->start)
246                 return(-1);
247         else if (a->start > b->start)
248                 return(1);
249         return(0);
250 }
251
252 /*
253  * Initialize vmspace ref/hold counts vmspace0.  There is a holdcnt for
254  * every refcnt.
255  */
256 void
257 vmspace_initrefs(struct vmspace *vm)
258 {
259         vm->vm_refcnt = 1;
260         vm->vm_holdcnt = 1;
261 }
262
263 /*
264  * Allocate a vmspace structure, including a vm_map and pmap.
265  * Initialize numerous fields.  While the initial allocation is zerod,
266  * subsequence reuse from the objcache leaves elements of the structure
267  * intact (particularly the pmap), so portions must be zerod.
268  *
269  * Returns a referenced vmspace.
270  *
271  * No requirements.
272  */
273 struct vmspace *
274 vmspace_alloc(vm_offset_t min, vm_offset_t max)
275 {
276         struct vmspace *vm;
277
278         vm = objcache_get(vmspace_cache, M_WAITOK);
279
280         bzero(&vm->vm_startcopy,
281               (char *)&vm->vm_endcopy - (char *)&vm->vm_startcopy);
282         vm_map_init(&vm->vm_map, min, max, NULL);       /* initializes token */
283
284         /*
285          * NOTE: hold to acquires token for safety.
286          *
287          * On return vmspace is referenced (refs=1, hold=1).  That is,
288          * each refcnt also has a holdcnt.  There can be additional holds
289          * (holdcnt) above and beyond the refcnt.  Finalization is handled in
290          * two stages, one on refs 1->0, and the the second on hold 1->0.
291          */
292         KKASSERT(vm->vm_holdcnt == 0);
293         KKASSERT(vm->vm_refcnt == (u_int)-1);
294         vmspace_initrefs(vm);
295         vmspace_hold(vm);
296         pmap_pinit(vmspace_pmap(vm));           /* (some fields reused) */
297         vm->vm_map.pmap = vmspace_pmap(vm);     /* XXX */
298         vm->vm_shm = NULL;
299         vm->vm_flags = 0;
300         cpu_vmspace_alloc(vm);
301         vmspace_drop(vm);
302
303         return (vm);
304 }
305
306 /*
307  * NOTE: Can return -1 if the vmspace is exiting.
308  */
309 int
310 vmspace_getrefs(struct vmspace *vm)
311 {
312         return ((int)vm->vm_refcnt);
313 }
314
315 /*
316  * A vmspace object must already have a non-zero hold to be able to gain
317  * further holds on it.
318  */
319 static void
320 vmspace_hold_notoken(struct vmspace *vm)
321 {
322         KKASSERT(vm->vm_holdcnt != 0);
323         refcount_acquire(&vm->vm_holdcnt);
324 }
325
326 static void
327 vmspace_drop_notoken(struct vmspace *vm)
328 {
329         if (refcount_release(&vm->vm_holdcnt)) {
330                 if (vm->vm_refcnt == (u_int)-1) {
331                         vmspace_terminate(vm, 1);
332                 }
333         }
334 }
335
336 void
337 vmspace_hold(struct vmspace *vm)
338 {
339         vmspace_hold_notoken(vm);
340         lwkt_gettoken(&vm->vm_map.token);
341 }
342
343 void
344 vmspace_drop(struct vmspace *vm)
345 {
346         lwkt_reltoken(&vm->vm_map.token);
347         vmspace_drop_notoken(vm);
348 }
349
350 /*
351  * A vmspace object must not be in a terminated state to be able to obtain
352  * additional refs on it.
353  *
354  * Ref'ing a vmspace object also increments its hold count.
355  */
356 void
357 vmspace_ref(struct vmspace *vm)
358 {
359         KKASSERT((int)vm->vm_refcnt >= 0);
360         vmspace_hold_notoken(vm);
361         refcount_acquire(&vm->vm_refcnt);
362 }
363
364 /*
365  * Release a ref on the vmspace.  On the 1->0 transition we do stage-1
366  * termination of the vmspace.  Then, on the final drop of the hold we
367  * will do stage-2 final termination.
368  */
369 void
370 vmspace_rel(struct vmspace *vm)
371 {
372         if (refcount_release(&vm->vm_refcnt)) {
373                 vm->vm_refcnt = (u_int)-1;      /* no other refs possible */
374                 vmspace_terminate(vm, 0);
375         }
376         vmspace_drop_notoken(vm);
377 }
378
379 /*
380  * This is called during exit indicating that the vmspace is no
381  * longer in used by an exiting process, but the process has not yet
382  * been reaped.
383  *
384  * We release the refcnt but not the associated holdcnt.
385  *
386  * No requirements.
387  */
388 void
389 vmspace_relexit(struct vmspace *vm)
390 {
391         if (refcount_release(&vm->vm_refcnt)) {
392                 vm->vm_refcnt = (u_int)-1;      /* no other refs possible */
393                 vmspace_terminate(vm, 0);
394         }
395 }
396
397 /*
398  * Called during reap to disconnect the remainder of the vmspace from
399  * the process.  On the hold drop the vmspace termination is finalized.
400  *
401  * No requirements.
402  */
403 void
404 vmspace_exitfree(struct proc *p)
405 {
406         struct vmspace *vm;
407
408         vm = p->p_vmspace;
409         p->p_vmspace = NULL;
410         vmspace_drop_notoken(vm);
411 }
412
413 /*
414  * Called in two cases:
415  *
416  * (1) When the last refcnt is dropped and the vmspace becomes inactive,
417  *     called with final == 0.  refcnt will be (u_int)-1 at this point,
418  *     and holdcnt will still be non-zero.
419  *
420  * (2) When holdcnt becomes 0, called with final == 1.  There should no
421  *     longer be anyone with access to the vmspace.
422  *
423  * VMSPACE_EXIT1 flags the primary deactivation
424  * VMSPACE_EXIT2 flags the last reap
425  */
426 static void
427 vmspace_terminate(struct vmspace *vm, int final)
428 {
429         int count;
430
431         lwkt_gettoken(&vm->vm_map.token);
432         if (final == 0) {
433                 KKASSERT((vm->vm_flags & VMSPACE_EXIT1) == 0);
434
435                 /*
436                  * Get rid of most of the resources.  Leave the kernel pmap
437                  * intact.
438                  *
439                  * If the pmap does not contain wired pages we can bulk-delete
440                  * the pmap as a performance optimization before removing the related mappings.
441                  *
442                  * If the pmap contains wired pages we cannot do this pre-optimization
443                  * because currently vm_fault_unwire() expects the pmap pages to exist
444                  * and will not decrement p->wire_count if they do not.
445                  */
446                 vm->vm_flags |= VMSPACE_EXIT1;
447                 shmexit(vm);
448                 if (vmspace_pmap(vm)->pm_stats.wired_count) {
449                         vm_map_remove(&vm->vm_map, VM_MIN_USER_ADDRESS,
450                                       VM_MAX_USER_ADDRESS);
451                         pmap_remove_pages(vmspace_pmap(vm), VM_MIN_USER_ADDRESS,
452                                           VM_MAX_USER_ADDRESS);
453                 } else {
454                         pmap_remove_pages(vmspace_pmap(vm), VM_MIN_USER_ADDRESS,
455                                           VM_MAX_USER_ADDRESS);
456                         vm_map_remove(&vm->vm_map, VM_MIN_USER_ADDRESS,
457                                       VM_MAX_USER_ADDRESS);
458                 }
459                 lwkt_reltoken(&vm->vm_map.token);
460         } else {
461                 KKASSERT((vm->vm_flags & VMSPACE_EXIT1) != 0);
462                 KKASSERT((vm->vm_flags & VMSPACE_EXIT2) == 0);
463
464                 /*
465                  * Get rid of remaining basic resources.
466                  */
467                 vm->vm_flags |= VMSPACE_EXIT2;
468                 cpu_vmspace_free(vm);
469                 shmexit(vm);
470
471                 /*
472                  * Lock the map, to wait out all other references to it.
473                  * Delete all of the mappings and pages they hold, then call
474                  * the pmap module to reclaim anything left.
475                  */
476                 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
477                 vm_map_lock(&vm->vm_map);
478                 vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
479                               vm->vm_map.max_offset, &count);
480                 vm_map_unlock(&vm->vm_map);
481                 vm_map_entry_release(count);
482
483                 lwkt_gettoken(&vmspace_pmap(vm)->pm_token);
484                 pmap_release(vmspace_pmap(vm));
485                 lwkt_reltoken(&vmspace_pmap(vm)->pm_token);
486                 lwkt_reltoken(&vm->vm_map.token);
487                 objcache_put(vmspace_cache, vm);
488         }
489 }
490
491 /*
492  * Swap useage is determined by taking the proportional swap used by
493  * VM objects backing the VM map.  To make up for fractional losses,
494  * if the VM object has any swap use at all the associated map entries
495  * count for at least 1 swap page.
496  *
497  * No requirements.
498  */
499 int
500 vmspace_swap_count(struct vmspace *vm)
501 {
502         vm_map_t map = &vm->vm_map;
503         vm_map_entry_t cur;
504         vm_object_t object;
505         int count = 0;
506         int n;
507
508         vmspace_hold(vm);
509         for (cur = map->header.next; cur != &map->header; cur = cur->next) {
510                 switch(cur->maptype) {
511                 case VM_MAPTYPE_NORMAL:
512                 case VM_MAPTYPE_VPAGETABLE:
513                         if ((object = cur->object.vm_object) == NULL)
514                                 break;
515                         if (object->swblock_count) {
516                                 n = (cur->end - cur->start) / PAGE_SIZE;
517                                 count += object->swblock_count *
518                                     SWAP_META_PAGES * n / object->size + 1;
519                         }
520                         break;
521                 default:
522                         break;
523                 }
524         }
525         vmspace_drop(vm);
526
527         return(count);
528 }
529
530 /*
531  * Calculate the approximate number of anonymous pages in use by
532  * this vmspace.  To make up for fractional losses, we count each
533  * VM object as having at least 1 anonymous page.
534  *
535  * No requirements.
536  */
537 int
538 vmspace_anonymous_count(struct vmspace *vm)
539 {
540         vm_map_t map = &vm->vm_map;
541         vm_map_entry_t cur;
542         vm_object_t object;
543         int count = 0;
544
545         vmspace_hold(vm);
546         for (cur = map->header.next; cur != &map->header; cur = cur->next) {
547                 switch(cur->maptype) {
548                 case VM_MAPTYPE_NORMAL:
549                 case VM_MAPTYPE_VPAGETABLE:
550                         if ((object = cur->object.vm_object) == NULL)
551                                 break;
552                         if (object->type != OBJT_DEFAULT &&
553                             object->type != OBJT_SWAP) {
554                                 break;
555                         }
556                         count += object->resident_page_count;
557                         break;
558                 default:
559                         break;
560                 }
561         }
562         vmspace_drop(vm);
563
564         return(count);
565 }
566
567 /*
568  * Creates and returns a new empty VM map with the given physical map
569  * structure, and having the given lower and upper address bounds.
570  *
571  * No requirements.
572  */
573 vm_map_t
574 vm_map_create(vm_map_t result, pmap_t pmap, vm_offset_t min, vm_offset_t max)
575 {
576         if (result == NULL)
577                 result = zalloc(mapzone);
578         vm_map_init(result, min, max, pmap);
579         return (result);
580 }
581
582 /*
583  * Initialize an existing vm_map structure such as that in the vmspace
584  * structure.  The pmap is initialized elsewhere.
585  *
586  * No requirements.
587  */
588 void
589 vm_map_init(struct vm_map *map, vm_offset_t min, vm_offset_t max, pmap_t pmap)
590 {
591         map->header.next = map->header.prev = &map->header;
592         RB_INIT(&map->rb_root);
593         map->nentries = 0;
594         map->size = 0;
595         map->system_map = 0;
596         map->min_offset = min;
597         map->max_offset = max;
598         map->pmap = pmap;
599         map->first_free = &map->header;
600         map->hint = &map->header;
601         map->timestamp = 0;
602         map->flags = 0;
603         lwkt_token_init(&map->token, "vm_map");
604         lockinit(&map->lock, "vm_maplk", (hz + 9) / 10, 0);
605 }
606
607 /*
608  * Shadow the vm_map_entry's object.  This typically needs to be done when
609  * a write fault is taken on an entry which had previously been cloned by
610  * fork().  The shared object (which might be NULL) must become private so
611  * we add a shadow layer above it.
612  *
613  * Object allocation for anonymous mappings is defered as long as possible.
614  * When creating a shadow, however, the underlying object must be instantiated
615  * so it can be shared.
616  *
617  * If the map segment is governed by a virtual page table then it is
618  * possible to address offsets beyond the mapped area.  Just allocate
619  * a maximally sized object for this case.
620  *
621  * If addref is non-zero an additional reference is added to the returned
622  * entry.  This mechanic exists because the additional reference might have
623  * to be added atomically and not after return to prevent a premature
624  * collapse.
625  *
626  * The vm_map must be exclusively locked.
627  * No other requirements.
628  */
629 static
630 void
631 vm_map_entry_shadow(vm_map_entry_t entry, int addref)
632 {
633         if (entry->maptype == VM_MAPTYPE_VPAGETABLE) {
634                 vm_object_shadow(&entry->object.vm_object, &entry->offset,
635                                  0x7FFFFFFF, addref);   /* XXX */
636         } else {
637                 vm_object_shadow(&entry->object.vm_object, &entry->offset,
638                                  atop(entry->end - entry->start), addref);
639         }
640         entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
641 }
642
643 /*
644  * Allocate an object for a vm_map_entry.
645  *
646  * Object allocation for anonymous mappings is defered as long as possible.
647  * This function is called when we can defer no longer, generally when a map
648  * entry might be split or forked or takes a page fault.
649  *
650  * If the map segment is governed by a virtual page table then it is
651  * possible to address offsets beyond the mapped area.  Just allocate
652  * a maximally sized object for this case.
653  *
654  * The vm_map must be exclusively locked.
655  * No other requirements.
656  */
657 void 
658 vm_map_entry_allocate_object(vm_map_entry_t entry)
659 {
660         vm_object_t obj;
661
662         if (entry->maptype == VM_MAPTYPE_VPAGETABLE) {
663                 obj = vm_object_allocate(OBJT_DEFAULT, 0x7FFFFFFF); /* XXX */
664         } else {
665                 obj = vm_object_allocate(OBJT_DEFAULT,
666                                          atop(entry->end - entry->start));
667         }
668         entry->object.vm_object = obj;
669         entry->offset = 0;
670 }
671
672 /*
673  * Set an initial negative count so the first attempt to reserve
674  * space preloads a bunch of vm_map_entry's for this cpu.  Also
675  * pre-allocate 2 vm_map_entries which will be needed by zalloc() to
676  * map a new page for vm_map_entry structures.  SMP systems are
677  * particularly sensitive.
678  *
679  * This routine is called in early boot so we cannot just call
680  * vm_map_entry_reserve().
681  *
682  * Called from the low level boot code only (for each cpu)
683  *
684  * WARNING! Take care not to have too-big a static/BSS structure here
685  *          as MAXCPU can be 256+, otherwise the loader's 64MB heap
686  *          can get blown out by the kernel plus the initrd image.
687  */
688 void
689 vm_map_entry_reserve_cpu_init(globaldata_t gd)
690 {
691         vm_map_entry_t entry;
692         int count;
693         int i;
694
695         gd->gd_vme_avail -= MAP_RESERVE_COUNT * 2;
696         if (gd->gd_cpuid == 0) {
697                 entry = &cpu_map_entry_init_bsp[0];
698                 count = MAPENTRYBSP_CACHE;
699         } else {
700                 entry = &cpu_map_entry_init_ap[gd->gd_cpuid][0];
701                 count = MAPENTRYAP_CACHE;
702         }
703         for (i = 0; i < count; ++i, ++entry) {
704                 entry->next = gd->gd_vme_base;
705                 gd->gd_vme_base = entry;
706         }
707 }
708
709 /*
710  * Reserves vm_map_entry structures so code later on can manipulate
711  * map_entry structures within a locked map without blocking trying
712  * to allocate a new vm_map_entry.
713  *
714  * No requirements.
715  */
716 int
717 vm_map_entry_reserve(int count)
718 {
719         struct globaldata *gd = mycpu;
720         vm_map_entry_t entry;
721
722         /*
723          * Make sure we have enough structures in gd_vme_base to handle
724          * the reservation request.
725          *
726          * The critical section protects access to the per-cpu gd.
727          */
728         crit_enter();
729         while (gd->gd_vme_avail < count) {
730                 entry = zalloc(mapentzone);
731                 entry->next = gd->gd_vme_base;
732                 gd->gd_vme_base = entry;
733                 ++gd->gd_vme_avail;
734         }
735         gd->gd_vme_avail -= count;
736         crit_exit();
737
738         return(count);
739 }
740
741 /*
742  * Releases previously reserved vm_map_entry structures that were not
743  * used.  If we have too much junk in our per-cpu cache clean some of
744  * it out.
745  *
746  * No requirements.
747  */
748 void
749 vm_map_entry_release(int count)
750 {
751         struct globaldata *gd = mycpu;
752         vm_map_entry_t entry;
753
754         crit_enter();
755         gd->gd_vme_avail += count;
756         while (gd->gd_vme_avail > MAP_RESERVE_SLOP) {
757                 entry = gd->gd_vme_base;
758                 KKASSERT(entry != NULL);
759                 gd->gd_vme_base = entry->next;
760                 --gd->gd_vme_avail;
761                 crit_exit();
762                 zfree(mapentzone, entry);
763                 crit_enter();
764         }
765         crit_exit();
766 }
767
768 /*
769  * Reserve map entry structures for use in kernel_map itself.  These
770  * entries have *ALREADY* been reserved on a per-cpu basis when the map
771  * was inited.  This function is used by zalloc() to avoid a recursion
772  * when zalloc() itself needs to allocate additional kernel memory.
773  *
774  * This function works like the normal reserve but does not load the
775  * vm_map_entry cache (because that would result in an infinite
776  * recursion).  Note that gd_vme_avail may go negative.  This is expected.
777  *
778  * Any caller of this function must be sure to renormalize after
779  * potentially eating entries to ensure that the reserve supply
780  * remains intact.
781  *
782  * No requirements.
783  */
784 int
785 vm_map_entry_kreserve(int count)
786 {
787         struct globaldata *gd = mycpu;
788
789         crit_enter();
790         gd->gd_vme_avail -= count;
791         crit_exit();
792         KASSERT(gd->gd_vme_base != NULL,
793                 ("no reserved entries left, gd_vme_avail = %d",
794                 gd->gd_vme_avail));
795         return(count);
796 }
797
798 /*
799  * Release previously reserved map entries for kernel_map.  We do not
800  * attempt to clean up like the normal release function as this would
801  * cause an unnecessary (but probably not fatal) deep procedure call.
802  *
803  * No requirements.
804  */
805 void
806 vm_map_entry_krelease(int count)
807 {
808         struct globaldata *gd = mycpu;
809
810         crit_enter();
811         gd->gd_vme_avail += count;
812         crit_exit();
813 }
814
815 /*
816  * Allocates a VM map entry for insertion.  No entry fields are filled in.
817  *
818  * The entries should have previously been reserved.  The reservation count
819  * is tracked in (*countp).
820  *
821  * No requirements.
822  */
823 static vm_map_entry_t
824 vm_map_entry_create(vm_map_t map, int *countp)
825 {
826         struct globaldata *gd = mycpu;
827         vm_map_entry_t entry;
828
829         KKASSERT(*countp > 0);
830         --*countp;
831         crit_enter();
832         entry = gd->gd_vme_base;
833         KASSERT(entry != NULL, ("gd_vme_base NULL! count %d", *countp));
834         gd->gd_vme_base = entry->next;
835         crit_exit();
836
837         return(entry);
838 }
839
840 /*
841  * Dispose of a vm_map_entry that is no longer being referenced.
842  *
843  * No requirements.
844  */
845 static void
846 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry, int *countp)
847 {
848         struct globaldata *gd = mycpu;
849
850         KKASSERT(map->hint != entry);
851         KKASSERT(map->first_free != entry);
852
853         ++*countp;
854         crit_enter();
855         entry->next = gd->gd_vme_base;
856         gd->gd_vme_base = entry;
857         crit_exit();
858 }
859
860
861 /*
862  * Insert/remove entries from maps.
863  *
864  * The related map must be exclusively locked.
865  * The caller must hold map->token
866  * No other requirements.
867  */
868 static __inline void
869 vm_map_entry_link(vm_map_t map,
870                   vm_map_entry_t after_where,
871                   vm_map_entry_t entry)
872 {
873         ASSERT_VM_MAP_LOCKED(map);
874
875         map->nentries++;
876         entry->prev = after_where;
877         entry->next = after_where->next;
878         entry->next->prev = entry;
879         after_where->next = entry;
880         if (vm_map_rb_tree_RB_INSERT(&map->rb_root, entry))
881                 panic("vm_map_entry_link: dup addr map %p ent %p", map, entry);
882 }
883
884 static __inline void
885 vm_map_entry_unlink(vm_map_t map,
886                     vm_map_entry_t entry)
887 {
888         vm_map_entry_t prev;
889         vm_map_entry_t next;
890
891         ASSERT_VM_MAP_LOCKED(map);
892
893         if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
894                 panic("vm_map_entry_unlink: attempt to mess with "
895                       "locked entry! %p", entry);
896         }
897         prev = entry->prev;
898         next = entry->next;
899         next->prev = prev;
900         prev->next = next;
901         vm_map_rb_tree_RB_REMOVE(&map->rb_root, entry);
902         map->nentries--;
903 }
904
905 /*
906  * Finds the map entry containing (or immediately preceding) the specified
907  * address in the given map.  The entry is returned in (*entry).
908  *
909  * The boolean result indicates whether the address is actually contained
910  * in the map.
911  *
912  * The related map must be locked.
913  * No other requirements.
914  */
915 boolean_t
916 vm_map_lookup_entry(vm_map_t map, vm_offset_t address, vm_map_entry_t *entry)
917 {
918         vm_map_entry_t tmp;
919         vm_map_entry_t last;
920
921         ASSERT_VM_MAP_LOCKED(map);
922 #if 0
923         /*
924          * XXX TEMPORARILY DISABLED.  For some reason our attempt to revive
925          * the hint code with the red-black lookup meets with system crashes
926          * and lockups.  We do not yet know why.
927          *
928          * It is possible that the problem is related to the setting
929          * of the hint during map_entry deletion, in the code specified
930          * at the GGG comment later on in this file.
931          *
932          * YYY More likely it's because this function can be called with
933          * a shared lock on the map, resulting in map->hint updates possibly
934          * racing.  Fixed now but untested.
935          */
936         /*
937          * Quickly check the cached hint, there's a good chance of a match.
938          */
939         tmp = map->hint;
940         cpu_ccfence();
941         if (tmp != &map->header) {
942                 if (address >= tmp->start && address < tmp->end) {
943                         *entry = tmp;
944                         return(TRUE);
945                 }
946         }
947 #endif
948
949         /*
950          * Locate the record from the top of the tree.  'last' tracks the
951          * closest prior record and is returned if no match is found, which
952          * in binary tree terms means tracking the most recent right-branch
953          * taken.  If there is no prior record, &map->header is returned.
954          */
955         last = &map->header;
956         tmp = RB_ROOT(&map->rb_root);
957
958         while (tmp) {
959                 if (address >= tmp->start) {
960                         if (address < tmp->end) {
961                                 *entry = tmp;
962                                 map->hint = tmp;
963                                 return(TRUE);
964                         }
965                         last = tmp;
966                         tmp = RB_RIGHT(tmp, rb_entry);
967                 } else {
968                         tmp = RB_LEFT(tmp, rb_entry);
969                 }
970         }
971         *entry = last;
972         return (FALSE);
973 }
974
975 /*
976  * Inserts the given whole VM object into the target map at the specified
977  * address range.  The object's size should match that of the address range.
978  *
979  * The map must be exclusively locked.
980  * The object must be held.
981  * The caller must have reserved sufficient vm_map_entry structures.
982  *
983  * If object is non-NULL, ref count must be bumped by caller prior to
984  * making call to account for the new entry.
985  */
986 int
987 vm_map_insert(vm_map_t map, int *countp, void *map_object, void *map_aux,
988               vm_ooffset_t offset, vm_offset_t start, vm_offset_t end,
989               vm_maptype_t maptype,
990               vm_prot_t prot, vm_prot_t max, int cow)
991 {
992         vm_map_entry_t new_entry;
993         vm_map_entry_t prev_entry;
994         vm_map_entry_t temp_entry;
995         vm_eflags_t protoeflags;
996         int must_drop = 0;
997         vm_object_t object;
998
999         if (maptype == VM_MAPTYPE_UKSMAP)
1000                 object = NULL;
1001         else
1002                 object = map_object;
1003
1004         ASSERT_VM_MAP_LOCKED(map);
1005         if (object)
1006                 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
1007
1008         /*
1009          * Check that the start and end points are not bogus.
1010          */
1011         if ((start < map->min_offset) || (end > map->max_offset) ||
1012             (start >= end))
1013                 return (KERN_INVALID_ADDRESS);
1014
1015         /*
1016          * Find the entry prior to the proposed starting address; if it's part
1017          * of an existing entry, this range is bogus.
1018          */
1019         if (vm_map_lookup_entry(map, start, &temp_entry))
1020                 return (KERN_NO_SPACE);
1021
1022         prev_entry = temp_entry;
1023
1024         /*
1025          * Assert that the next entry doesn't overlap the end point.
1026          */
1027
1028         if ((prev_entry->next != &map->header) &&
1029             (prev_entry->next->start < end))
1030                 return (KERN_NO_SPACE);
1031
1032         protoeflags = 0;
1033
1034         if (cow & MAP_COPY_ON_WRITE)
1035                 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
1036
1037         if (cow & MAP_NOFAULT) {
1038                 protoeflags |= MAP_ENTRY_NOFAULT;
1039
1040                 KASSERT(object == NULL,
1041                         ("vm_map_insert: paradoxical MAP_NOFAULT request"));
1042         }
1043         if (cow & MAP_DISABLE_SYNCER)
1044                 protoeflags |= MAP_ENTRY_NOSYNC;
1045         if (cow & MAP_DISABLE_COREDUMP)
1046                 protoeflags |= MAP_ENTRY_NOCOREDUMP;
1047         if (cow & MAP_IS_STACK)
1048                 protoeflags |= MAP_ENTRY_STACK;
1049         if (cow & MAP_IS_KSTACK)
1050                 protoeflags |= MAP_ENTRY_KSTACK;
1051
1052         lwkt_gettoken(&map->token);
1053
1054         if (object) {
1055                 /*
1056                  * When object is non-NULL, it could be shared with another
1057                  * process.  We have to set or clear OBJ_ONEMAPPING 
1058                  * appropriately.
1059                  *
1060                  * NOTE: This flag is only applicable to DEFAULT and SWAP
1061                  *       objects and will already be clear in other types
1062                  *       of objects, so a shared object lock is ok for
1063                  *       VNODE objects.
1064                  */
1065                 if ((object->ref_count > 1) || (object->shadow_count != 0)) {
1066                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
1067                 }
1068         }
1069         else if ((prev_entry != &map->header) &&
1070                  (prev_entry->eflags == protoeflags) &&
1071                  (prev_entry->end == start) &&
1072                  (prev_entry->wired_count == 0) &&
1073                  prev_entry->maptype == maptype &&
1074                  maptype == VM_MAPTYPE_NORMAL &&
1075                  ((prev_entry->object.vm_object == NULL) ||
1076                   vm_object_coalesce(prev_entry->object.vm_object,
1077                                      OFF_TO_IDX(prev_entry->offset),
1078                                      (vm_size_t)(prev_entry->end - prev_entry->start),
1079                                      (vm_size_t)(end - prev_entry->end)))) {
1080                 /*
1081                  * We were able to extend the object.  Determine if we
1082                  * can extend the previous map entry to include the 
1083                  * new range as well.
1084                  */
1085                 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
1086                     (prev_entry->protection == prot) &&
1087                     (prev_entry->max_protection == max)) {
1088                         map->size += (end - prev_entry->end);
1089                         prev_entry->end = end;
1090                         vm_map_simplify_entry(map, prev_entry, countp);
1091                         lwkt_reltoken(&map->token);
1092                         return (KERN_SUCCESS);
1093                 }
1094
1095                 /*
1096                  * If we can extend the object but cannot extend the
1097                  * map entry, we have to create a new map entry.  We
1098                  * must bump the ref count on the extended object to
1099                  * account for it.  object may be NULL.
1100                  *
1101                  * XXX if object is NULL should we set offset to 0 here ?
1102                  */
1103                 object = prev_entry->object.vm_object;
1104                 offset = prev_entry->offset +
1105                         (prev_entry->end - prev_entry->start);
1106                 if (object) {
1107                         vm_object_hold(object);
1108                         vm_object_chain_wait(object, 0);
1109                         vm_object_reference_locked(object);
1110                         must_drop = 1;
1111                         map_object = object;
1112                 }
1113         }
1114
1115         /*
1116          * NOTE: if conditionals fail, object can be NULL here.  This occurs
1117          * in things like the buffer map where we manage kva but do not manage
1118          * backing objects.
1119          */
1120
1121         /*
1122          * Create a new entry
1123          */
1124
1125         new_entry = vm_map_entry_create(map, countp);
1126         new_entry->start = start;
1127         new_entry->end = end;
1128
1129         new_entry->maptype = maptype;
1130         new_entry->eflags = protoeflags;
1131         new_entry->object.map_object = map_object;
1132         new_entry->aux.master_pde = 0;          /* in case size is different */
1133         new_entry->aux.map_aux = map_aux;
1134         new_entry->offset = offset;
1135
1136         new_entry->inheritance = VM_INHERIT_DEFAULT;
1137         new_entry->protection = prot;
1138         new_entry->max_protection = max;
1139         new_entry->wired_count = 0;
1140
1141         /*
1142          * Insert the new entry into the list
1143          */
1144
1145         vm_map_entry_link(map, prev_entry, new_entry);
1146         map->size += new_entry->end - new_entry->start;
1147
1148         /*
1149          * Update the free space hint.  Entries cannot overlap.
1150          * An exact comparison is needed to avoid matching
1151          * against the map->header.
1152          */
1153         if ((map->first_free == prev_entry) &&
1154             (prev_entry->end == new_entry->start)) {
1155                 map->first_free = new_entry;
1156         }
1157
1158 #if 0
1159         /*
1160          * Temporarily removed to avoid MAP_STACK panic, due to
1161          * MAP_STACK being a huge hack.  Will be added back in
1162          * when MAP_STACK (and the user stack mapping) is fixed.
1163          */
1164         /*
1165          * It may be possible to simplify the entry
1166          */
1167         vm_map_simplify_entry(map, new_entry, countp);
1168 #endif
1169
1170         /*
1171          * Try to pre-populate the page table.  Mappings governed by virtual
1172          * page tables cannot be prepopulated without a lot of work, so
1173          * don't try.
1174          */
1175         if ((cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) &&
1176             maptype != VM_MAPTYPE_VPAGETABLE &&
1177             maptype != VM_MAPTYPE_UKSMAP) {
1178                 int dorelock = 0;
1179                 if (vm_map_relock_enable && (cow & MAP_PREFAULT_RELOCK)) {
1180                         dorelock = 1;
1181                         vm_object_lock_swap();
1182                         vm_object_drop(object);
1183                 }
1184                 pmap_object_init_pt(map->pmap, start, prot,
1185                                     object, OFF_TO_IDX(offset), end - start,
1186                                     cow & MAP_PREFAULT_PARTIAL);
1187                 if (dorelock) {
1188                         vm_object_hold(object);
1189                         vm_object_lock_swap();
1190                 }
1191         }
1192         if (must_drop)
1193                 vm_object_drop(object);
1194
1195         lwkt_reltoken(&map->token);
1196         return (KERN_SUCCESS);
1197 }
1198
1199 /*
1200  * Find sufficient space for `length' bytes in the given map, starting at
1201  * `start'.  Returns 0 on success, 1 on no space.
1202  *
1203  * This function will returned an arbitrarily aligned pointer.  If no
1204  * particular alignment is required you should pass align as 1.  Note that
1205  * the map may return PAGE_SIZE aligned pointers if all the lengths used in
1206  * the map are a multiple of PAGE_SIZE, even if you pass a smaller align
1207  * argument.
1208  *
1209  * 'align' should be a power of 2 but is not required to be.
1210  *
1211  * The map must be exclusively locked.
1212  * No other requirements.
1213  */
1214 int
1215 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length,
1216                  vm_size_t align, int flags, vm_offset_t *addr)
1217 {
1218         vm_map_entry_t entry, next;
1219         vm_offset_t end;
1220         vm_offset_t align_mask;
1221
1222         if (start < map->min_offset)
1223                 start = map->min_offset;
1224         if (start > map->max_offset)
1225                 return (1);
1226
1227         /*
1228          * If the alignment is not a power of 2 we will have to use
1229          * a mod/division, set align_mask to a special value.
1230          */
1231         if ((align | (align - 1)) + 1 != (align << 1))
1232                 align_mask = (vm_offset_t)-1;
1233         else
1234                 align_mask = align - 1;
1235
1236         /*
1237          * Look for the first possible address; if there's already something
1238          * at this address, we have to start after it.
1239          */
1240         if (start == map->min_offset) {
1241                 if ((entry = map->first_free) != &map->header)
1242                         start = entry->end;
1243         } else {
1244                 vm_map_entry_t tmp;
1245
1246                 if (vm_map_lookup_entry(map, start, &tmp))
1247                         start = tmp->end;
1248                 entry = tmp;
1249         }
1250
1251         /*
1252          * Look through the rest of the map, trying to fit a new region in the
1253          * gap between existing regions, or after the very last region.
1254          */
1255         for (;; start = (entry = next)->end) {
1256                 /*
1257                  * Adjust the proposed start by the requested alignment,
1258                  * be sure that we didn't wrap the address.
1259                  */
1260                 if (align_mask == (vm_offset_t)-1)
1261                         end = roundup(start, align);
1262                 else
1263                         end = (start + align_mask) & ~align_mask;
1264                 if (end < start)
1265                         return (1);
1266                 start = end;
1267                 /*
1268                  * Find the end of the proposed new region.  Be sure we didn't
1269                  * go beyond the end of the map, or wrap around the address.
1270                  * Then check to see if this is the last entry or if the 
1271                  * proposed end fits in the gap between this and the next
1272                  * entry.
1273                  */
1274                 end = start + length;
1275                 if (end > map->max_offset || end < start)
1276                         return (1);
1277                 next = entry->next;
1278
1279                 /*
1280                  * If the next entry's start address is beyond the desired
1281                  * end address we may have found a good entry.
1282                  *
1283                  * If the next entry is a stack mapping we do not map into
1284                  * the stack's reserved space.
1285                  *
1286                  * XXX continue to allow mapping into the stack's reserved
1287                  * space if doing a MAP_STACK mapping inside a MAP_STACK
1288                  * mapping, for backwards compatibility.  But the caller
1289                  * really should use MAP_STACK | MAP_TRYFIXED if they
1290                  * want to do that.
1291                  */
1292                 if (next == &map->header)
1293                         break;
1294                 if (next->start >= end) {
1295                         if ((next->eflags & MAP_ENTRY_STACK) == 0)
1296                                 break;
1297                         if (flags & MAP_STACK)
1298                                 break;
1299                         if (next->start - next->aux.avail_ssize >= end)
1300                                 break;
1301                 }
1302         }
1303         map->hint = entry;
1304
1305         /*
1306          * Grow the kernel_map if necessary.  pmap_growkernel() will panic
1307          * if it fails.  The kernel_map is locked and nothing can steal
1308          * our address space if pmap_growkernel() blocks.
1309          *
1310          * NOTE: This may be unconditionally called for kldload areas on
1311          *       x86_64 because these do not bump kernel_vm_end (which would
1312          *       fill 128G worth of page tables!).  Therefore we must not
1313          *       retry.
1314          */
1315         if (map == &kernel_map) {
1316                 vm_offset_t kstop;
1317
1318                 kstop = round_page(start + length);
1319                 if (kstop > kernel_vm_end)
1320                         pmap_growkernel(start, kstop);
1321         }
1322         *addr = start;
1323         return (0);
1324 }
1325
1326 /*
1327  * vm_map_find finds an unallocated region in the target address map with
1328  * the given length and allocates it.  The search is defined to be first-fit
1329  * from the specified address; the region found is returned in the same
1330  * parameter.
1331  *
1332  * If object is non-NULL, ref count must be bumped by caller
1333  * prior to making call to account for the new entry.
1334  *
1335  * No requirements.  This function will lock the map temporarily.
1336  */
1337 int
1338 vm_map_find(vm_map_t map, void *map_object, void *map_aux,
1339             vm_ooffset_t offset, vm_offset_t *addr,
1340             vm_size_t length, vm_size_t align,
1341             boolean_t fitit,
1342             vm_maptype_t maptype,
1343             vm_prot_t prot, vm_prot_t max,
1344             int cow)
1345 {
1346         vm_offset_t start;
1347         vm_object_t object;
1348         int result;
1349         int count;
1350
1351         if (maptype == VM_MAPTYPE_UKSMAP)
1352                 object = NULL;
1353         else
1354                 object = map_object;
1355
1356         start = *addr;
1357
1358         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1359         vm_map_lock(map);
1360         if (object)
1361                 vm_object_hold_shared(object);
1362         if (fitit) {
1363                 if (vm_map_findspace(map, start, length, align, 0, addr)) {
1364                         if (object)
1365                                 vm_object_drop(object);
1366                         vm_map_unlock(map);
1367                         vm_map_entry_release(count);
1368                         return (KERN_NO_SPACE);
1369                 }
1370                 start = *addr;
1371         }
1372         result = vm_map_insert(map, &count, map_object, map_aux,
1373                                offset, start, start + length,
1374                                maptype, prot, max, cow);
1375         if (object)
1376                 vm_object_drop(object);
1377         vm_map_unlock(map);
1378         vm_map_entry_release(count);
1379
1380         return (result);
1381 }
1382
1383 /*
1384  * Simplify the given map entry by merging with either neighbor.  This
1385  * routine also has the ability to merge with both neighbors.
1386  *
1387  * This routine guarentees that the passed entry remains valid (though
1388  * possibly extended).  When merging, this routine may delete one or
1389  * both neighbors.  No action is taken on entries which have their
1390  * in-transition flag set.
1391  *
1392  * The map must be exclusively locked.
1393  */
1394 void
1395 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry, int *countp)
1396 {
1397         vm_map_entry_t next, prev;
1398         vm_size_t prevsize, esize;
1399
1400         if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1401                 ++mycpu->gd_cnt.v_intrans_coll;
1402                 return;
1403         }
1404
1405         if (entry->maptype == VM_MAPTYPE_SUBMAP)
1406                 return;
1407         if (entry->maptype == VM_MAPTYPE_UKSMAP)
1408                 return;
1409
1410         prev = entry->prev;
1411         if (prev != &map->header) {
1412                 prevsize = prev->end - prev->start;
1413                 if ( (prev->end == entry->start) &&
1414                      (prev->maptype == entry->maptype) &&
1415                      (prev->object.vm_object == entry->object.vm_object) &&
1416                      (!prev->object.vm_object ||
1417                         (prev->offset + prevsize == entry->offset)) &&
1418                      (prev->eflags == entry->eflags) &&
1419                      (prev->protection == entry->protection) &&
1420                      (prev->max_protection == entry->max_protection) &&
1421                      (prev->inheritance == entry->inheritance) &&
1422                      (prev->wired_count == entry->wired_count)) {
1423                         if (map->first_free == prev)
1424                                 map->first_free = entry;
1425                         if (map->hint == prev)
1426                                 map->hint = entry;
1427                         vm_map_entry_unlink(map, prev);
1428                         entry->start = prev->start;
1429                         entry->offset = prev->offset;
1430                         if (prev->object.vm_object)
1431                                 vm_object_deallocate(prev->object.vm_object);
1432                         vm_map_entry_dispose(map, prev, countp);
1433                 }
1434         }
1435
1436         next = entry->next;
1437         if (next != &map->header) {
1438                 esize = entry->end - entry->start;
1439                 if ((entry->end == next->start) &&
1440                     (next->maptype == entry->maptype) &&
1441                     (next->object.vm_object == entry->object.vm_object) &&
1442                      (!entry->object.vm_object ||
1443                         (entry->offset + esize == next->offset)) &&
1444                     (next->eflags == entry->eflags) &&
1445                     (next->protection == entry->protection) &&
1446                     (next->max_protection == entry->max_protection) &&
1447                     (next->inheritance == entry->inheritance) &&
1448                     (next->wired_count == entry->wired_count)) {
1449                         if (map->first_free == next)
1450                                 map->first_free = entry;
1451                         if (map->hint == next)
1452                                 map->hint = entry;
1453                         vm_map_entry_unlink(map, next);
1454                         entry->end = next->end;
1455                         if (next->object.vm_object)
1456                                 vm_object_deallocate(next->object.vm_object);
1457                         vm_map_entry_dispose(map, next, countp);
1458                 }
1459         }
1460 }
1461
1462 /*
1463  * Asserts that the given entry begins at or after the specified address.
1464  * If necessary, it splits the entry into two.
1465  */
1466 #define vm_map_clip_start(map, entry, startaddr, countp)                \
1467 {                                                                       \
1468         if (startaddr > entry->start)                                   \
1469                 _vm_map_clip_start(map, entry, startaddr, countp);      \
1470 }
1471
1472 /*
1473  * This routine is called only when it is known that the entry must be split.
1474  *
1475  * The map must be exclusively locked.
1476  */
1477 static void
1478 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start,
1479                    int *countp)
1480 {
1481         vm_map_entry_t new_entry;
1482
1483         /*
1484          * Split off the front portion -- note that we must insert the new
1485          * entry BEFORE this one, so that this entry has the specified
1486          * starting address.
1487          */
1488
1489         vm_map_simplify_entry(map, entry, countp);
1490
1491         /*
1492          * If there is no object backing this entry, we might as well create
1493          * one now.  If we defer it, an object can get created after the map
1494          * is clipped, and individual objects will be created for the split-up
1495          * map.  This is a bit of a hack, but is also about the best place to
1496          * put this improvement.
1497          */
1498         if (entry->object.vm_object == NULL && !map->system_map) {
1499                 vm_map_entry_allocate_object(entry);
1500         }
1501
1502         new_entry = vm_map_entry_create(map, countp);
1503         *new_entry = *entry;
1504
1505         new_entry->end = start;
1506         entry->offset += (start - entry->start);
1507         entry->start = start;
1508
1509         vm_map_entry_link(map, entry->prev, new_entry);
1510
1511         switch(entry->maptype) {
1512         case VM_MAPTYPE_NORMAL:
1513         case VM_MAPTYPE_VPAGETABLE:
1514                 if (new_entry->object.vm_object) {
1515                         vm_object_hold(new_entry->object.vm_object);
1516                         vm_object_chain_wait(new_entry->object.vm_object, 0);
1517                         vm_object_reference_locked(new_entry->object.vm_object);
1518                         vm_object_drop(new_entry->object.vm_object);
1519                 }
1520                 break;
1521         default:
1522                 break;
1523         }
1524 }
1525
1526 /*
1527  * Asserts that the given entry ends at or before the specified address.
1528  * If necessary, it splits the entry into two.
1529  *
1530  * The map must be exclusively locked.
1531  */
1532 #define vm_map_clip_end(map, entry, endaddr, countp)            \
1533 {                                                               \
1534         if (endaddr < entry->end)                               \
1535                 _vm_map_clip_end(map, entry, endaddr, countp);  \
1536 }
1537
1538 /*
1539  * This routine is called only when it is known that the entry must be split.
1540  *
1541  * The map must be exclusively locked.
1542  */
1543 static void
1544 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end,
1545                  int *countp)
1546 {
1547         vm_map_entry_t new_entry;
1548
1549         /*
1550          * If there is no object backing this entry, we might as well create
1551          * one now.  If we defer it, an object can get created after the map
1552          * is clipped, and individual objects will be created for the split-up
1553          * map.  This is a bit of a hack, but is also about the best place to
1554          * put this improvement.
1555          */
1556
1557         if (entry->object.vm_object == NULL && !map->system_map) {
1558                 vm_map_entry_allocate_object(entry);
1559         }
1560
1561         /*
1562          * Create a new entry and insert it AFTER the specified entry
1563          */
1564
1565         new_entry = vm_map_entry_create(map, countp);
1566         *new_entry = *entry;
1567
1568         new_entry->start = entry->end = end;
1569         new_entry->offset += (end - entry->start);
1570
1571         vm_map_entry_link(map, entry, new_entry);
1572
1573         switch(entry->maptype) {
1574         case VM_MAPTYPE_NORMAL:
1575         case VM_MAPTYPE_VPAGETABLE:
1576                 if (new_entry->object.vm_object) {
1577                         vm_object_hold(new_entry->object.vm_object);
1578                         vm_object_chain_wait(new_entry->object.vm_object, 0);
1579                         vm_object_reference_locked(new_entry->object.vm_object);
1580                         vm_object_drop(new_entry->object.vm_object);
1581                 }
1582                 break;
1583         default:
1584                 break;
1585         }
1586 }
1587
1588 /*
1589  * Asserts that the starting and ending region addresses fall within the
1590  * valid range for the map.
1591  */
1592 #define VM_MAP_RANGE_CHECK(map, start, end)     \
1593 {                                               \
1594         if (start < vm_map_min(map))            \
1595                 start = vm_map_min(map);        \
1596         if (end > vm_map_max(map))              \
1597                 end = vm_map_max(map);          \
1598         if (start > end)                        \
1599                 start = end;                    \
1600 }
1601
1602 /*
1603  * Used to block when an in-transition collison occurs.  The map
1604  * is unlocked for the sleep and relocked before the return.
1605  */
1606 void
1607 vm_map_transition_wait(vm_map_t map)
1608 {
1609         tsleep_interlock(map, 0);
1610         vm_map_unlock(map);
1611         tsleep(map, PINTERLOCKED, "vment", 0);
1612         vm_map_lock(map);
1613 }
1614
1615 /*
1616  * When we do blocking operations with the map lock held it is
1617  * possible that a clip might have occured on our in-transit entry,
1618  * requiring an adjustment to the entry in our loop.  These macros
1619  * help the pageable and clip_range code deal with the case.  The
1620  * conditional costs virtually nothing if no clipping has occured.
1621  */
1622
1623 #define CLIP_CHECK_BACK(entry, save_start)              \
1624     do {                                                \
1625             while (entry->start != save_start) {        \
1626                     entry = entry->prev;                \
1627                     KASSERT(entry != &map->header, ("bad entry clip")); \
1628             }                                           \
1629     } while(0)
1630
1631 #define CLIP_CHECK_FWD(entry, save_end)                 \
1632     do {                                                \
1633             while (entry->end != save_end) {            \
1634                     entry = entry->next;                \
1635                     KASSERT(entry != &map->header, ("bad entry clip")); \
1636             }                                           \
1637     } while(0)
1638
1639
1640 /*
1641  * Clip the specified range and return the base entry.  The
1642  * range may cover several entries starting at the returned base
1643  * and the first and last entry in the covering sequence will be
1644  * properly clipped to the requested start and end address.
1645  *
1646  * If no holes are allowed you should pass the MAP_CLIP_NO_HOLES
1647  * flag.
1648  *
1649  * The MAP_ENTRY_IN_TRANSITION flag will be set for the entries
1650  * covered by the requested range.
1651  *
1652  * The map must be exclusively locked on entry and will remain locked
1653  * on return. If no range exists or the range contains holes and you
1654  * specified that no holes were allowed, NULL will be returned.  This
1655  * routine may temporarily unlock the map in order avoid a deadlock when
1656  * sleeping.
1657  */
1658 static
1659 vm_map_entry_t
1660 vm_map_clip_range(vm_map_t map, vm_offset_t start, vm_offset_t end, 
1661                   int *countp, int flags)
1662 {
1663         vm_map_entry_t start_entry;
1664         vm_map_entry_t entry;
1665
1666         /*
1667          * Locate the entry and effect initial clipping.  The in-transition
1668          * case does not occur very often so do not try to optimize it.
1669          */
1670 again:
1671         if (vm_map_lookup_entry(map, start, &start_entry) == FALSE)
1672                 return (NULL);
1673         entry = start_entry;
1674         if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1675                 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1676                 ++mycpu->gd_cnt.v_intrans_coll;
1677                 ++mycpu->gd_cnt.v_intrans_wait;
1678                 vm_map_transition_wait(map);
1679                 /*
1680                  * entry and/or start_entry may have been clipped while
1681                  * we slept, or may have gone away entirely.  We have
1682                  * to restart from the lookup.
1683                  */
1684                 goto again;
1685         }
1686
1687         /*
1688          * Since we hold an exclusive map lock we do not have to restart
1689          * after clipping, even though clipping may block in zalloc.
1690          */
1691         vm_map_clip_start(map, entry, start, countp);
1692         vm_map_clip_end(map, entry, end, countp);
1693         entry->eflags |= MAP_ENTRY_IN_TRANSITION;
1694
1695         /*
1696          * Scan entries covered by the range.  When working on the next
1697          * entry a restart need only re-loop on the current entry which
1698          * we have already locked, since 'next' may have changed.  Also,
1699          * even though entry is safe, it may have been clipped so we
1700          * have to iterate forwards through the clip after sleeping.
1701          */
1702         while (entry->next != &map->header && entry->next->start < end) {
1703                 vm_map_entry_t next = entry->next;
1704
1705                 if (flags & MAP_CLIP_NO_HOLES) {
1706                         if (next->start > entry->end) {
1707                                 vm_map_unclip_range(map, start_entry,
1708                                         start, entry->end, countp, flags);
1709                                 return(NULL);
1710                         }
1711                 }
1712
1713                 if (next->eflags & MAP_ENTRY_IN_TRANSITION) {
1714                         vm_offset_t save_end = entry->end;
1715                         next->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1716                         ++mycpu->gd_cnt.v_intrans_coll;
1717                         ++mycpu->gd_cnt.v_intrans_wait;
1718                         vm_map_transition_wait(map);
1719
1720                         /*
1721                          * clips might have occured while we blocked.
1722                          */
1723                         CLIP_CHECK_FWD(entry, save_end);
1724                         CLIP_CHECK_BACK(start_entry, start);
1725                         continue;
1726                 }
1727                 /*
1728                  * No restart necessary even though clip_end may block, we
1729                  * are holding the map lock.
1730                  */
1731                 vm_map_clip_end(map, next, end, countp);
1732                 next->eflags |= MAP_ENTRY_IN_TRANSITION;
1733                 entry = next;
1734         }
1735         if (flags & MAP_CLIP_NO_HOLES) {
1736                 if (entry->end != end) {
1737                         vm_map_unclip_range(map, start_entry,
1738                                 start, entry->end, countp, flags);
1739                         return(NULL);
1740                 }
1741         }
1742         return(start_entry);
1743 }
1744
1745 /*
1746  * Undo the effect of vm_map_clip_range().  You should pass the same
1747  * flags and the same range that you passed to vm_map_clip_range().
1748  * This code will clear the in-transition flag on the entries and
1749  * wake up anyone waiting.  This code will also simplify the sequence
1750  * and attempt to merge it with entries before and after the sequence.
1751  *
1752  * The map must be locked on entry and will remain locked on return.
1753  *
1754  * Note that you should also pass the start_entry returned by
1755  * vm_map_clip_range().  However, if you block between the two calls
1756  * with the map unlocked please be aware that the start_entry may
1757  * have been clipped and you may need to scan it backwards to find
1758  * the entry corresponding with the original start address.  You are
1759  * responsible for this, vm_map_unclip_range() expects the correct
1760  * start_entry to be passed to it and will KASSERT otherwise.
1761  */
1762 static
1763 void
1764 vm_map_unclip_range(vm_map_t map, vm_map_entry_t start_entry,
1765                     vm_offset_t start, vm_offset_t end,
1766                     int *countp, int flags)
1767 {
1768         vm_map_entry_t entry;
1769
1770         entry = start_entry;
1771
1772         KASSERT(entry->start == start, ("unclip_range: illegal base entry"));
1773         while (entry != &map->header && entry->start < end) {
1774                 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
1775                         ("in-transition flag not set during unclip on: %p",
1776                         entry));
1777                 KASSERT(entry->end <= end,
1778                         ("unclip_range: tail wasn't clipped"));
1779                 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
1780                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
1781                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
1782                         wakeup(map);
1783                 }
1784                 entry = entry->next;
1785         }
1786
1787         /*
1788          * Simplification does not block so there is no restart case.
1789          */
1790         entry = start_entry;
1791         while (entry != &map->header && entry->start < end) {
1792                 vm_map_simplify_entry(map, entry, countp);
1793                 entry = entry->next;
1794         }
1795 }
1796
1797 /*
1798  * Mark the given range as handled by a subordinate map.
1799  *
1800  * This range must have been created with vm_map_find(), and no other
1801  * operations may have been performed on this range prior to calling
1802  * vm_map_submap().
1803  *
1804  * Submappings cannot be removed.
1805  *
1806  * No requirements.
1807  */
1808 int
1809 vm_map_submap(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_map_t submap)
1810 {
1811         vm_map_entry_t entry;
1812         int result = KERN_INVALID_ARGUMENT;
1813         int count;
1814
1815         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1816         vm_map_lock(map);
1817
1818         VM_MAP_RANGE_CHECK(map, start, end);
1819
1820         if (vm_map_lookup_entry(map, start, &entry)) {
1821                 vm_map_clip_start(map, entry, start, &count);
1822         } else {
1823                 entry = entry->next;
1824         }
1825
1826         vm_map_clip_end(map, entry, end, &count);
1827
1828         if ((entry->start == start) && (entry->end == end) &&
1829             ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1830             (entry->object.vm_object == NULL)) {
1831                 entry->object.sub_map = submap;
1832                 entry->maptype = VM_MAPTYPE_SUBMAP;
1833                 result = KERN_SUCCESS;
1834         }
1835         vm_map_unlock(map);
1836         vm_map_entry_release(count);
1837
1838         return (result);
1839 }
1840
1841 /*
1842  * Sets the protection of the specified address region in the target map. 
1843  * If "set_max" is specified, the maximum protection is to be set;
1844  * otherwise, only the current protection is affected.
1845  *
1846  * The protection is not applicable to submaps, but is applicable to normal
1847  * maps and maps governed by virtual page tables.  For example, when operating
1848  * on a virtual page table our protection basically controls how COW occurs
1849  * on the backing object, whereas the virtual page table abstraction itself
1850  * is an abstraction for userland.
1851  *
1852  * No requirements.
1853  */
1854 int
1855 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1856                vm_prot_t new_prot, boolean_t set_max)
1857 {
1858         vm_map_entry_t current;
1859         vm_map_entry_t entry;
1860         int count;
1861
1862         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1863         vm_map_lock(map);
1864
1865         VM_MAP_RANGE_CHECK(map, start, end);
1866
1867         if (vm_map_lookup_entry(map, start, &entry)) {
1868                 vm_map_clip_start(map, entry, start, &count);
1869         } else {
1870                 entry = entry->next;
1871         }
1872
1873         /*
1874          * Make a first pass to check for protection violations.
1875          */
1876         current = entry;
1877         while ((current != &map->header) && (current->start < end)) {
1878                 if (current->maptype == VM_MAPTYPE_SUBMAP) {
1879                         vm_map_unlock(map);
1880                         vm_map_entry_release(count);
1881                         return (KERN_INVALID_ARGUMENT);
1882                 }
1883                 if ((new_prot & current->max_protection) != new_prot) {
1884                         vm_map_unlock(map);
1885                         vm_map_entry_release(count);
1886                         return (KERN_PROTECTION_FAILURE);
1887                 }
1888                 current = current->next;
1889         }
1890
1891         /*
1892          * Go back and fix up protections. [Note that clipping is not
1893          * necessary the second time.]
1894          */
1895         current = entry;
1896
1897         while ((current != &map->header) && (current->start < end)) {
1898                 vm_prot_t old_prot;
1899
1900                 vm_map_clip_end(map, current, end, &count);
1901
1902                 old_prot = current->protection;
1903                 if (set_max) {
1904                         current->protection =
1905                             (current->max_protection = new_prot) &
1906                             old_prot;
1907                 } else {
1908                         current->protection = new_prot;
1909                 }
1910
1911                 /*
1912                  * Update physical map if necessary. Worry about copy-on-write
1913                  * here -- CHECK THIS XXX
1914                  */
1915
1916                 if (current->protection != old_prot) {
1917 #define MASK(entry)     (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1918                                                         VM_PROT_ALL)
1919
1920                         pmap_protect(map->pmap, current->start,
1921                             current->end,
1922                             current->protection & MASK(current));
1923 #undef  MASK
1924                 }
1925
1926                 vm_map_simplify_entry(map, current, &count);
1927
1928                 current = current->next;
1929         }
1930
1931         vm_map_unlock(map);
1932         vm_map_entry_release(count);
1933         return (KERN_SUCCESS);
1934 }
1935
1936 /*
1937  * This routine traverses a processes map handling the madvise
1938  * system call.  Advisories are classified as either those effecting
1939  * the vm_map_entry structure, or those effecting the underlying
1940  * objects.
1941  *
1942  * The <value> argument is used for extended madvise calls.
1943  *
1944  * No requirements.
1945  */
1946 int
1947 vm_map_madvise(vm_map_t map, vm_offset_t start, vm_offset_t end,
1948                int behav, off_t value)
1949 {
1950         vm_map_entry_t current, entry;
1951         int modify_map = 0;
1952         int error = 0;
1953         int count;
1954
1955         /*
1956          * Some madvise calls directly modify the vm_map_entry, in which case
1957          * we need to use an exclusive lock on the map and we need to perform 
1958          * various clipping operations.  Otherwise we only need a read-lock
1959          * on the map.
1960          */
1961
1962         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1963
1964         switch(behav) {
1965         case MADV_NORMAL:
1966         case MADV_SEQUENTIAL:
1967         case MADV_RANDOM:
1968         case MADV_NOSYNC:
1969         case MADV_AUTOSYNC:
1970         case MADV_NOCORE:
1971         case MADV_CORE:
1972         case MADV_SETMAP:
1973         case MADV_INVAL:
1974                 modify_map = 1;
1975                 vm_map_lock(map);
1976                 break;
1977         case MADV_WILLNEED:
1978         case MADV_DONTNEED:
1979         case MADV_FREE:
1980                 vm_map_lock_read(map);
1981                 break;
1982         default:
1983                 vm_map_entry_release(count);
1984                 return (EINVAL);
1985         }
1986
1987         /*
1988          * Locate starting entry and clip if necessary.
1989          */
1990
1991         VM_MAP_RANGE_CHECK(map, start, end);
1992
1993         if (vm_map_lookup_entry(map, start, &entry)) {
1994                 if (modify_map)
1995                         vm_map_clip_start(map, entry, start, &count);
1996         } else {
1997                 entry = entry->next;
1998         }
1999
2000         if (modify_map) {
2001                 /*
2002                  * madvise behaviors that are implemented in the vm_map_entry.
2003                  *
2004                  * We clip the vm_map_entry so that behavioral changes are
2005                  * limited to the specified address range.
2006                  */
2007                 for (current = entry;
2008                      (current != &map->header) && (current->start < end);
2009                      current = current->next
2010                 ) {
2011                         if (current->maptype == VM_MAPTYPE_SUBMAP)
2012                                 continue;
2013
2014                         vm_map_clip_end(map, current, end, &count);
2015
2016                         switch (behav) {
2017                         case MADV_NORMAL:
2018                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
2019                                 break;
2020                         case MADV_SEQUENTIAL:
2021                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
2022                                 break;
2023                         case MADV_RANDOM:
2024                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
2025                                 break;
2026                         case MADV_NOSYNC:
2027                                 current->eflags |= MAP_ENTRY_NOSYNC;
2028                                 break;
2029                         case MADV_AUTOSYNC:
2030                                 current->eflags &= ~MAP_ENTRY_NOSYNC;
2031                                 break;
2032                         case MADV_NOCORE:
2033                                 current->eflags |= MAP_ENTRY_NOCOREDUMP;
2034                                 break;
2035                         case MADV_CORE:
2036                                 current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
2037                                 break;
2038                         case MADV_INVAL:
2039                                 /*
2040                                  * Invalidate the related pmap entries, used
2041                                  * to flush portions of the real kernel's
2042                                  * pmap when the caller has removed or
2043                                  * modified existing mappings in a virtual
2044                                  * page table.
2045                                  */
2046                                 pmap_remove(map->pmap,
2047                                             current->start, current->end);
2048                                 break;
2049                         case MADV_SETMAP:
2050                                 /*
2051                                  * Set the page directory page for a map
2052                                  * governed by a virtual page table.  Mark
2053                                  * the entry as being governed by a virtual
2054                                  * page table if it is not.
2055                                  *
2056                                  * XXX the page directory page is stored
2057                                  * in the avail_ssize field if the map_entry.
2058                                  *
2059                                  * XXX the map simplification code does not
2060                                  * compare this field so weird things may
2061                                  * happen if you do not apply this function
2062                                  * to the entire mapping governed by the
2063                                  * virtual page table.
2064                                  */
2065                                 if (current->maptype != VM_MAPTYPE_VPAGETABLE) {
2066                                         error = EINVAL;
2067                                         break;
2068                                 }
2069                                 current->aux.master_pde = value;
2070                                 pmap_remove(map->pmap,
2071                                             current->start, current->end);
2072                                 break;
2073                         default:
2074                                 error = EINVAL;
2075                                 break;
2076                         }
2077                         vm_map_simplify_entry(map, current, &count);
2078                 }
2079                 vm_map_unlock(map);
2080         } else {
2081                 vm_pindex_t pindex;
2082                 int count;
2083
2084                 /*
2085                  * madvise behaviors that are implemented in the underlying
2086                  * vm_object.
2087                  *
2088                  * Since we don't clip the vm_map_entry, we have to clip
2089                  * the vm_object pindex and count.
2090                  *
2091                  * NOTE!  We currently do not support these functions on
2092                  * virtual page tables.
2093                  */
2094                 for (current = entry;
2095                      (current != &map->header) && (current->start < end);
2096                      current = current->next
2097                 ) {
2098                         vm_offset_t useStart;
2099
2100                         if (current->maptype != VM_MAPTYPE_NORMAL)
2101                                 continue;
2102
2103                         pindex = OFF_TO_IDX(current->offset);
2104                         count = atop(current->end - current->start);
2105                         useStart = current->start;
2106
2107                         if (current->start < start) {
2108                                 pindex += atop(start - current->start);
2109                                 count -= atop(start - current->start);
2110                                 useStart = start;
2111                         }
2112                         if (current->end > end)
2113                                 count -= atop(current->end - end);
2114
2115                         if (count <= 0)
2116                                 continue;
2117
2118                         vm_object_madvise(current->object.vm_object,
2119                                           pindex, count, behav);
2120
2121                         /*
2122                          * Try to populate the page table.  Mappings governed
2123                          * by virtual page tables cannot be pre-populated
2124                          * without a lot of work so don't try.
2125                          */
2126                         if (behav == MADV_WILLNEED &&
2127                             current->maptype != VM_MAPTYPE_VPAGETABLE) {
2128                                 pmap_object_init_pt(
2129                                     map->pmap, 
2130                                     useStart,
2131                                     current->protection,
2132                                     current->object.vm_object,
2133                                     pindex, 
2134                                     (count << PAGE_SHIFT),
2135                                     MAP_PREFAULT_MADVISE
2136                                 );
2137                         }
2138                 }
2139                 vm_map_unlock_read(map);
2140         }
2141         vm_map_entry_release(count);
2142         return(error);
2143 }       
2144
2145
2146 /*
2147  * Sets the inheritance of the specified address range in the target map.
2148  * Inheritance affects how the map will be shared with child maps at the
2149  * time of vm_map_fork.
2150  */
2151 int
2152 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
2153                vm_inherit_t new_inheritance)
2154 {
2155         vm_map_entry_t entry;
2156         vm_map_entry_t temp_entry;
2157         int count;
2158
2159         switch (new_inheritance) {
2160         case VM_INHERIT_NONE:
2161         case VM_INHERIT_COPY:
2162         case VM_INHERIT_SHARE:
2163                 break;
2164         default:
2165                 return (KERN_INVALID_ARGUMENT);
2166         }
2167
2168         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2169         vm_map_lock(map);
2170
2171         VM_MAP_RANGE_CHECK(map, start, end);
2172
2173         if (vm_map_lookup_entry(map, start, &temp_entry)) {
2174                 entry = temp_entry;
2175                 vm_map_clip_start(map, entry, start, &count);
2176         } else
2177                 entry = temp_entry->next;
2178
2179         while ((entry != &map->header) && (entry->start < end)) {
2180                 vm_map_clip_end(map, entry, end, &count);
2181
2182                 entry->inheritance = new_inheritance;
2183
2184                 vm_map_simplify_entry(map, entry, &count);
2185
2186                 entry = entry->next;
2187         }
2188         vm_map_unlock(map);
2189         vm_map_entry_release(count);
2190         return (KERN_SUCCESS);
2191 }
2192
2193 /*
2194  * Implement the semantics of mlock
2195  */
2196 int
2197 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t real_end,
2198               boolean_t new_pageable)
2199 {
2200         vm_map_entry_t entry;
2201         vm_map_entry_t start_entry;
2202         vm_offset_t end;
2203         int rv = KERN_SUCCESS;
2204         int count;
2205
2206         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2207         vm_map_lock(map);
2208         VM_MAP_RANGE_CHECK(map, start, real_end);
2209         end = real_end;
2210
2211         start_entry = vm_map_clip_range(map, start, end, &count,
2212                                         MAP_CLIP_NO_HOLES);
2213         if (start_entry == NULL) {
2214                 vm_map_unlock(map);
2215                 vm_map_entry_release(count);
2216                 return (KERN_INVALID_ADDRESS);
2217         }
2218
2219         if (new_pageable == 0) {
2220                 entry = start_entry;
2221                 while ((entry != &map->header) && (entry->start < end)) {
2222                         vm_offset_t save_start;
2223                         vm_offset_t save_end;
2224
2225                         /*
2226                          * Already user wired or hard wired (trivial cases)
2227                          */
2228                         if (entry->eflags & MAP_ENTRY_USER_WIRED) {
2229                                 entry = entry->next;
2230                                 continue;
2231                         }
2232                         if (entry->wired_count != 0) {
2233                                 entry->wired_count++;
2234                                 entry->eflags |= MAP_ENTRY_USER_WIRED;
2235                                 entry = entry->next;
2236                                 continue;
2237                         }
2238
2239                         /*
2240                          * A new wiring requires instantiation of appropriate
2241                          * management structures and the faulting in of the
2242                          * page.
2243                          */
2244                         if (entry->maptype == VM_MAPTYPE_NORMAL ||
2245                             entry->maptype == VM_MAPTYPE_VPAGETABLE) {
2246                                 int copyflag = entry->eflags &
2247                                                MAP_ENTRY_NEEDS_COPY;
2248                                 if (copyflag && ((entry->protection &
2249                                                   VM_PROT_WRITE) != 0)) {
2250                                         vm_map_entry_shadow(entry, 0);
2251                                 } else if (entry->object.vm_object == NULL &&
2252                                            !map->system_map) {
2253                                         vm_map_entry_allocate_object(entry);
2254                                 }
2255                         }
2256                         entry->wired_count++;
2257                         entry->eflags |= MAP_ENTRY_USER_WIRED;
2258
2259                         /*
2260                          * Now fault in the area.  Note that vm_fault_wire()
2261                          * may release the map lock temporarily, it will be
2262                          * relocked on return.  The in-transition
2263                          * flag protects the entries. 
2264                          */
2265                         save_start = entry->start;
2266                         save_end = entry->end;
2267                         rv = vm_fault_wire(map, entry, TRUE, 0);
2268                         if (rv) {
2269                                 CLIP_CHECK_BACK(entry, save_start);
2270                                 for (;;) {
2271                                         KASSERT(entry->wired_count == 1, ("bad wired_count on entry"));
2272                                         entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2273                                         entry->wired_count = 0;
2274                                         if (entry->end == save_end)
2275                                                 break;
2276                                         entry = entry->next;
2277                                         KASSERT(entry != &map->header, ("bad entry clip during backout"));
2278                                 }
2279                                 end = save_start;       /* unwire the rest */
2280                                 break;
2281                         }
2282                         /*
2283                          * note that even though the entry might have been
2284                          * clipped, the USER_WIRED flag we set prevents
2285                          * duplication so we do not have to do a 
2286                          * clip check.
2287                          */
2288                         entry = entry->next;
2289                 }
2290
2291                 /*
2292                  * If we failed fall through to the unwiring section to
2293                  * unwire what we had wired so far.  'end' has already
2294                  * been adjusted.
2295                  */
2296                 if (rv)
2297                         new_pageable = 1;
2298
2299                 /*
2300                  * start_entry might have been clipped if we unlocked the
2301                  * map and blocked.  No matter how clipped it has gotten
2302                  * there should be a fragment that is on our start boundary.
2303                  */
2304                 CLIP_CHECK_BACK(start_entry, start);
2305         }
2306
2307         /*
2308          * Deal with the unwiring case.
2309          */
2310         if (new_pageable) {
2311                 /*
2312                  * This is the unwiring case.  We must first ensure that the
2313                  * range to be unwired is really wired down.  We know there
2314                  * are no holes.
2315                  */
2316                 entry = start_entry;
2317                 while ((entry != &map->header) && (entry->start < end)) {
2318                         if ((entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2319                                 rv = KERN_INVALID_ARGUMENT;
2320                                 goto done;
2321                         }
2322                         KASSERT(entry->wired_count != 0, ("wired count was 0 with USER_WIRED set! %p", entry));
2323                         entry = entry->next;
2324                 }
2325
2326                 /*
2327                  * Now decrement the wiring count for each region. If a region
2328                  * becomes completely unwired, unwire its physical pages and
2329                  * mappings.
2330                  */
2331                 /*
2332                  * The map entries are processed in a loop, checking to
2333                  * make sure the entry is wired and asserting it has a wired
2334                  * count. However, another loop was inserted more-or-less in
2335                  * the middle of the unwiring path. This loop picks up the
2336                  * "entry" loop variable from the first loop without first
2337                  * setting it to start_entry. Naturally, the secound loop
2338                  * is never entered and the pages backing the entries are
2339                  * never unwired. This can lead to a leak of wired pages.
2340                  */
2341                 entry = start_entry;
2342                 while ((entry != &map->header) && (entry->start < end)) {
2343                         KASSERT(entry->eflags & MAP_ENTRY_USER_WIRED,
2344                                 ("expected USER_WIRED on entry %p", entry));
2345                         entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2346                         entry->wired_count--;
2347                         if (entry->wired_count == 0)
2348                                 vm_fault_unwire(map, entry);
2349                         entry = entry->next;
2350                 }
2351         }
2352 done:
2353         vm_map_unclip_range(map, start_entry, start, real_end, &count,
2354                 MAP_CLIP_NO_HOLES);
2355         map->timestamp++;
2356         vm_map_unlock(map);
2357         vm_map_entry_release(count);
2358         return (rv);
2359 }
2360
2361 /*
2362  * Sets the pageability of the specified address range in the target map.
2363  * Regions specified as not pageable require locked-down physical
2364  * memory and physical page maps.
2365  *
2366  * The map must not be locked, but a reference must remain to the map
2367  * throughout the call.
2368  *
2369  * This function may be called via the zalloc path and must properly
2370  * reserve map entries for kernel_map.
2371  *
2372  * No requirements.
2373  */
2374 int
2375 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t real_end, int kmflags)
2376 {
2377         vm_map_entry_t entry;
2378         vm_map_entry_t start_entry;
2379         vm_offset_t end;
2380         int rv = KERN_SUCCESS;
2381         int count;
2382
2383         if (kmflags & KM_KRESERVE)
2384                 count = vm_map_entry_kreserve(MAP_RESERVE_COUNT);
2385         else
2386                 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2387         vm_map_lock(map);
2388         VM_MAP_RANGE_CHECK(map, start, real_end);
2389         end = real_end;
2390
2391         start_entry = vm_map_clip_range(map, start, end, &count,
2392                                         MAP_CLIP_NO_HOLES);
2393         if (start_entry == NULL) {
2394                 vm_map_unlock(map);
2395                 rv = KERN_INVALID_ADDRESS;
2396                 goto failure;
2397         }
2398         if ((kmflags & KM_PAGEABLE) == 0) {
2399                 /*
2400                  * Wiring.  
2401                  *
2402                  * 1.  Holding the write lock, we create any shadow or zero-fill
2403                  * objects that need to be created. Then we clip each map
2404                  * entry to the region to be wired and increment its wiring
2405                  * count.  We create objects before clipping the map entries
2406                  * to avoid object proliferation.
2407                  *
2408                  * 2.  We downgrade to a read lock, and call vm_fault_wire to
2409                  * fault in the pages for any newly wired area (wired_count is
2410                  * 1).
2411                  *
2412                  * Downgrading to a read lock for vm_fault_wire avoids a 
2413                  * possible deadlock with another process that may have faulted
2414                  * on one of the pages to be wired (it would mark the page busy,
2415                  * blocking us, then in turn block on the map lock that we
2416                  * hold).  Because of problems in the recursive lock package,
2417                  * we cannot upgrade to a write lock in vm_map_lookup.  Thus,
2418                  * any actions that require the write lock must be done
2419                  * beforehand.  Because we keep the read lock on the map, the
2420                  * copy-on-write status of the entries we modify here cannot
2421                  * change.
2422                  */
2423                 entry = start_entry;
2424                 while ((entry != &map->header) && (entry->start < end)) {
2425                         /*
2426                          * Trivial case if the entry is already wired
2427                          */
2428                         if (entry->wired_count) {
2429                                 entry->wired_count++;
2430                                 entry = entry->next;
2431                                 continue;
2432                         }
2433
2434                         /*
2435                          * The entry is being newly wired, we have to setup
2436                          * appropriate management structures.  A shadow 
2437                          * object is required for a copy-on-write region,
2438                          * or a normal object for a zero-fill region.  We
2439                          * do not have to do this for entries that point to sub
2440                          * maps because we won't hold the lock on the sub map.
2441                          */
2442                         if (entry->maptype == VM_MAPTYPE_NORMAL ||
2443                             entry->maptype == VM_MAPTYPE_VPAGETABLE) {
2444                                 int copyflag = entry->eflags &
2445                                                MAP_ENTRY_NEEDS_COPY;
2446                                 if (copyflag && ((entry->protection &
2447                                                   VM_PROT_WRITE) != 0)) {
2448                                         vm_map_entry_shadow(entry, 0);
2449                                 } else if (entry->object.vm_object == NULL &&
2450                                            !map->system_map) {
2451                                         vm_map_entry_allocate_object(entry);
2452                                 }
2453                         }
2454
2455                         entry->wired_count++;
2456                         entry = entry->next;
2457                 }
2458
2459                 /*
2460                  * Pass 2.
2461                  */
2462
2463                 /*
2464                  * HACK HACK HACK HACK
2465                  *
2466                  * vm_fault_wire() temporarily unlocks the map to avoid
2467                  * deadlocks.  The in-transition flag from vm_map_clip_range
2468                  * call should protect us from changes while the map is
2469                  * unlocked.  T
2470                  *
2471                  * NOTE: Previously this comment stated that clipping might
2472                  *       still occur while the entry is unlocked, but from
2473                  *       what I can tell it actually cannot.
2474                  *
2475                  *       It is unclear whether the CLIP_CHECK_*() calls
2476                  *       are still needed but we keep them in anyway.
2477                  *
2478                  * HACK HACK HACK HACK
2479                  */
2480
2481                 entry = start_entry;
2482                 while (entry != &map->header && entry->start < end) {
2483                         /*
2484                          * If vm_fault_wire fails for any page we need to undo
2485                          * what has been done.  We decrement the wiring count
2486                          * for those pages which have not yet been wired (now)
2487                          * and unwire those that have (later).
2488                          */
2489                         vm_offset_t save_start = entry->start;
2490                         vm_offset_t save_end = entry->end;
2491
2492                         if (entry->wired_count == 1)
2493                                 rv = vm_fault_wire(map, entry, FALSE, kmflags);
2494                         if (rv) {
2495                                 CLIP_CHECK_BACK(entry, save_start);
2496                                 for (;;) {
2497                                         KASSERT(entry->wired_count == 1, ("wired_count changed unexpectedly"));
2498                                         entry->wired_count = 0;
2499                                         if (entry->end == save_end)
2500                                                 break;
2501                                         entry = entry->next;
2502                                         KASSERT(entry != &map->header, ("bad entry clip during backout"));
2503                                 }
2504                                 end = save_start;
2505                                 break;
2506                         }
2507                         CLIP_CHECK_FWD(entry, save_end);
2508                         entry = entry->next;
2509                 }
2510
2511                 /*
2512                  * If a failure occured undo everything by falling through
2513                  * to the unwiring code.  'end' has already been adjusted
2514                  * appropriately.
2515                  */
2516                 if (rv)
2517                         kmflags |= KM_PAGEABLE;
2518
2519                 /*
2520                  * start_entry is still IN_TRANSITION but may have been 
2521                  * clipped since vm_fault_wire() unlocks and relocks the
2522                  * map.  No matter how clipped it has gotten there should
2523                  * be a fragment that is on our start boundary.
2524                  */
2525                 CLIP_CHECK_BACK(start_entry, start);
2526         }
2527
2528         if (kmflags & KM_PAGEABLE) {
2529                 /*
2530                  * This is the unwiring case.  We must first ensure that the
2531                  * range to be unwired is really wired down.  We know there
2532                  * are no holes.
2533                  */
2534                 entry = start_entry;
2535                 while ((entry != &map->header) && (entry->start < end)) {
2536                         if (entry->wired_count == 0) {
2537                                 rv = KERN_INVALID_ARGUMENT;
2538                                 goto done;
2539                         }
2540                         entry = entry->next;
2541                 }
2542
2543                 /*
2544                  * Now decrement the wiring count for each region. If a region
2545                  * becomes completely unwired, unwire its physical pages and
2546                  * mappings.
2547                  */
2548                 entry = start_entry;
2549                 while ((entry != &map->header) && (entry->start < end)) {
2550                         entry->wired_count--;
2551                         if (entry->wired_count == 0)
2552                                 vm_fault_unwire(map, entry);
2553                         entry = entry->next;
2554                 }
2555         }
2556 done:
2557         vm_map_unclip_range(map, start_entry, start, real_end,
2558                             &count, MAP_CLIP_NO_HOLES);
2559         map->timestamp++;
2560         vm_map_unlock(map);
2561 failure:
2562         if (kmflags & KM_KRESERVE)
2563                 vm_map_entry_krelease(count);
2564         else
2565                 vm_map_entry_release(count);
2566         return (rv);
2567 }
2568
2569 /*
2570  * Mark a newly allocated address range as wired but do not fault in
2571  * the pages.  The caller is expected to load the pages into the object.
2572  *
2573  * The map must be locked on entry and will remain locked on return.
2574  * No other requirements.
2575  */
2576 void
2577 vm_map_set_wired_quick(vm_map_t map, vm_offset_t addr, vm_size_t size,
2578                        int *countp)
2579 {
2580         vm_map_entry_t scan;
2581         vm_map_entry_t entry;
2582
2583         entry = vm_map_clip_range(map, addr, addr + size,
2584                                   countp, MAP_CLIP_NO_HOLES);
2585         for (scan = entry;
2586              scan != &map->header && scan->start < addr + size;
2587              scan = scan->next) {
2588             KKASSERT(scan->wired_count == 0);
2589             scan->wired_count = 1;
2590         }
2591         vm_map_unclip_range(map, entry, addr, addr + size,
2592                             countp, MAP_CLIP_NO_HOLES);
2593 }
2594
2595 /*
2596  * Push any dirty cached pages in the address range to their pager.
2597  * If syncio is TRUE, dirty pages are written synchronously.
2598  * If invalidate is TRUE, any cached pages are freed as well.
2599  *
2600  * This routine is called by sys_msync()
2601  *
2602  * Returns an error if any part of the specified range is not mapped.
2603  *
2604  * No requirements.
2605  */
2606 int
2607 vm_map_clean(vm_map_t map, vm_offset_t start, vm_offset_t end,
2608              boolean_t syncio, boolean_t invalidate)
2609 {
2610         vm_map_entry_t current;
2611         vm_map_entry_t entry;
2612         vm_size_t size;
2613         vm_object_t object;
2614         vm_object_t tobj;
2615         vm_ooffset_t offset;
2616
2617         vm_map_lock_read(map);
2618         VM_MAP_RANGE_CHECK(map, start, end);
2619         if (!vm_map_lookup_entry(map, start, &entry)) {
2620                 vm_map_unlock_read(map);
2621                 return (KERN_INVALID_ADDRESS);
2622         }
2623         lwkt_gettoken(&map->token);
2624
2625         /*
2626          * Make a first pass to check for holes.
2627          */
2628         for (current = entry; current->start < end; current = current->next) {
2629                 if (current->maptype == VM_MAPTYPE_SUBMAP) {
2630                         lwkt_reltoken(&map->token);
2631                         vm_map_unlock_read(map);
2632                         return (KERN_INVALID_ARGUMENT);
2633                 }
2634                 if (end > current->end &&
2635                     (current->next == &map->header ||
2636                         current->end != current->next->start)) {
2637                         lwkt_reltoken(&map->token);
2638                         vm_map_unlock_read(map);
2639                         return (KERN_INVALID_ADDRESS);
2640                 }
2641         }
2642
2643         if (invalidate)
2644                 pmap_remove(vm_map_pmap(map), start, end);
2645
2646         /*
2647          * Make a second pass, cleaning/uncaching pages from the indicated
2648          * objects as we go.
2649          */
2650         for (current = entry; current->start < end; current = current->next) {
2651                 offset = current->offset + (start - current->start);
2652                 size = (end <= current->end ? end : current->end) - start;
2653
2654                 switch(current->maptype) {
2655                 case VM_MAPTYPE_SUBMAP:
2656                 {
2657                         vm_map_t smap;
2658                         vm_map_entry_t tentry;
2659                         vm_size_t tsize;
2660
2661                         smap = current->object.sub_map;
2662                         vm_map_lock_read(smap);
2663                         vm_map_lookup_entry(smap, offset, &tentry);
2664                         tsize = tentry->end - offset;
2665                         if (tsize < size)
2666                                 size = tsize;
2667                         object = tentry->object.vm_object;
2668                         offset = tentry->offset + (offset - tentry->start);
2669                         vm_map_unlock_read(smap);
2670                         break;
2671                 }
2672                 case VM_MAPTYPE_NORMAL:
2673                 case VM_MAPTYPE_VPAGETABLE:
2674                         object = current->object.vm_object;
2675                         break;
2676                 default:
2677                         object = NULL;
2678                         break;
2679                 }
2680
2681                 if (object)
2682                         vm_object_hold(object);
2683
2684                 /*
2685                  * Note that there is absolutely no sense in writing out
2686                  * anonymous objects, so we track down the vnode object
2687                  * to write out.
2688                  * We invalidate (remove) all pages from the address space
2689                  * anyway, for semantic correctness.
2690                  *
2691                  * note: certain anonymous maps, such as MAP_NOSYNC maps,
2692                  * may start out with a NULL object.
2693                  */
2694                 while (object && (tobj = object->backing_object) != NULL) {
2695                         vm_object_hold(tobj);
2696                         if (tobj == object->backing_object) {
2697                                 vm_object_lock_swap();
2698                                 offset += object->backing_object_offset;
2699                                 vm_object_drop(object);
2700                                 object = tobj;
2701                                 if (object->size < OFF_TO_IDX(offset + size))
2702                                         size = IDX_TO_OFF(object->size) -
2703                                                offset;
2704                                 break;
2705                         }
2706                         vm_object_drop(tobj);
2707                 }
2708                 if (object && (object->type == OBJT_VNODE) && 
2709                     (current->protection & VM_PROT_WRITE) &&
2710                     (object->flags & OBJ_NOMSYNC) == 0) {
2711                         /*
2712                          * Flush pages if writing is allowed, invalidate them
2713                          * if invalidation requested.  Pages undergoing I/O
2714                          * will be ignored by vm_object_page_remove().
2715                          *
2716                          * We cannot lock the vnode and then wait for paging
2717                          * to complete without deadlocking against vm_fault.
2718                          * Instead we simply call vm_object_page_remove() and
2719                          * allow it to block internally on a page-by-page 
2720                          * basis when it encounters pages undergoing async 
2721                          * I/O.
2722                          */
2723                         int flags;
2724
2725                         /* no chain wait needed for vnode objects */
2726                         vm_object_reference_locked(object);
2727                         vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY);
2728                         flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
2729                         flags |= invalidate ? OBJPC_INVAL : 0;
2730
2731                         /*
2732                          * When operating on a virtual page table just
2733                          * flush the whole object.  XXX we probably ought
2734                          * to 
2735                          */
2736                         switch(current->maptype) {
2737                         case VM_MAPTYPE_NORMAL:
2738                                 vm_object_page_clean(object,
2739                                     OFF_TO_IDX(offset),
2740                                     OFF_TO_IDX(offset + size + PAGE_MASK),
2741                                     flags);
2742                                 break;
2743                         case VM_MAPTYPE_VPAGETABLE:
2744                                 vm_object_page_clean(object, 0, 0, flags);
2745                                 break;
2746                         }
2747                         vn_unlock(((struct vnode *)object->handle));
2748                         vm_object_deallocate_locked(object);
2749                 }
2750                 if (object && invalidate &&
2751                    ((object->type == OBJT_VNODE) ||
2752                     (object->type == OBJT_DEVICE) ||
2753                     (object->type == OBJT_MGTDEVICE))) {
2754                         int clean_only = 
2755                                 ((object->type == OBJT_DEVICE) ||
2756                                 (object->type == OBJT_MGTDEVICE)) ? FALSE : TRUE;
2757                         /* no chain wait needed for vnode/device objects */
2758                         vm_object_reference_locked(object);
2759                         switch(current->maptype) {
2760                         case VM_MAPTYPE_NORMAL:
2761                                 vm_object_page_remove(object,
2762                                     OFF_TO_IDX(offset),
2763                                     OFF_TO_IDX(offset + size + PAGE_MASK),
2764                                     clean_only);
2765                                 break;
2766                         case VM_MAPTYPE_VPAGETABLE:
2767                                 vm_object_page_remove(object, 0, 0, clean_only);
2768                                 break;
2769                         }
2770                         vm_object_deallocate_locked(object);
2771                 }
2772                 start += size;
2773                 if (object)
2774                         vm_object_drop(object);
2775         }
2776
2777         lwkt_reltoken(&map->token);
2778         vm_map_unlock_read(map);
2779
2780         return (KERN_SUCCESS);
2781 }
2782
2783 /*
2784  * Make the region specified by this entry pageable.
2785  *
2786  * The vm_map must be exclusively locked.
2787  */
2788 static void 
2789 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
2790 {
2791         entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2792         entry->wired_count = 0;
2793         vm_fault_unwire(map, entry);
2794 }
2795
2796 /*
2797  * Deallocate the given entry from the target map.
2798  *
2799  * The vm_map must be exclusively locked.
2800  */
2801 static void
2802 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry, int *countp)
2803 {
2804         vm_map_entry_unlink(map, entry);
2805         map->size -= entry->end - entry->start;
2806
2807         switch(entry->maptype) {
2808         case VM_MAPTYPE_NORMAL:
2809         case VM_MAPTYPE_VPAGETABLE:
2810         case VM_MAPTYPE_SUBMAP:
2811                 vm_object_deallocate(entry->object.vm_object);
2812                 break;
2813         case VM_MAPTYPE_UKSMAP:
2814                 /* XXX TODO */
2815                 break;
2816         default:
2817                 break;
2818         }
2819
2820         vm_map_entry_dispose(map, entry, countp);
2821 }
2822
2823 /*
2824  * Deallocates the given address range from the target map.
2825  *
2826  * The vm_map must be exclusively locked.
2827  */
2828 int
2829 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end, int *countp)
2830 {
2831         vm_object_t object;
2832         vm_map_entry_t entry;
2833         vm_map_entry_t first_entry;
2834
2835         ASSERT_VM_MAP_LOCKED(map);
2836         lwkt_gettoken(&map->token);
2837 again:
2838         /*
2839          * Find the start of the region, and clip it.  Set entry to point
2840          * at the first record containing the requested address or, if no
2841          * such record exists, the next record with a greater address.  The
2842          * loop will run from this point until a record beyond the termination
2843          * address is encountered.
2844          *
2845          * map->hint must be adjusted to not point to anything we delete,
2846          * so set it to the entry prior to the one being deleted.
2847          *
2848          * GGG see other GGG comment.
2849          */
2850         if (vm_map_lookup_entry(map, start, &first_entry)) {
2851                 entry = first_entry;
2852                 vm_map_clip_start(map, entry, start, countp);
2853                 map->hint = entry->prev;        /* possible problem XXX */
2854         } else {
2855                 map->hint = first_entry;        /* possible problem XXX */
2856                 entry = first_entry->next;
2857         }
2858
2859         /*
2860          * If a hole opens up prior to the current first_free then
2861          * adjust first_free.  As with map->hint, map->first_free
2862          * cannot be left set to anything we might delete.
2863          */
2864         if (entry == &map->header) {
2865                 map->first_free = &map->header;
2866         } else if (map->first_free->start >= start) {
2867                 map->first_free = entry->prev;
2868         }
2869
2870         /*
2871          * Step through all entries in this region
2872          */
2873         while ((entry != &map->header) && (entry->start < end)) {
2874                 vm_map_entry_t next;
2875                 vm_offset_t s, e;
2876                 vm_pindex_t offidxstart, offidxend, count;
2877
2878                 /*
2879                  * If we hit an in-transition entry we have to sleep and
2880                  * retry.  It's easier (and not really slower) to just retry
2881                  * since this case occurs so rarely and the hint is already
2882                  * pointing at the right place.  We have to reset the
2883                  * start offset so as not to accidently delete an entry
2884                  * another process just created in vacated space.
2885                  */
2886                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2887                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2888                         start = entry->start;
2889                         ++mycpu->gd_cnt.v_intrans_coll;
2890                         ++mycpu->gd_cnt.v_intrans_wait;
2891                         vm_map_transition_wait(map);
2892                         goto again;
2893                 }
2894                 vm_map_clip_end(map, entry, end, countp);
2895
2896                 s = entry->start;
2897                 e = entry->end;
2898                 next = entry->next;
2899
2900                 offidxstart = OFF_TO_IDX(entry->offset);
2901                 count = OFF_TO_IDX(e - s);
2902
2903                 switch(entry->maptype) {
2904                 case VM_MAPTYPE_NORMAL:
2905                 case VM_MAPTYPE_VPAGETABLE:
2906                 case VM_MAPTYPE_SUBMAP:
2907                         object = entry->object.vm_object;
2908                         break;
2909                 default:
2910                         object = NULL;
2911                         break;
2912                 }
2913
2914                 /*
2915                  * Unwire before removing addresses from the pmap; otherwise,
2916                  * unwiring will put the entries back in the pmap.
2917                  */
2918                 if (entry->wired_count != 0)
2919                         vm_map_entry_unwire(map, entry);
2920
2921                 offidxend = offidxstart + count;
2922
2923                 if (object == &kernel_object) {
2924                         vm_object_hold(object);
2925                         vm_object_page_remove(object, offidxstart,
2926                                               offidxend, FALSE);
2927                         vm_object_drop(object);
2928                 } else if (object && object->type != OBJT_DEFAULT &&
2929                            object->type != OBJT_SWAP) {
2930                         /*
2931                          * vnode object routines cannot be chain-locked,
2932                          * but since we aren't removing pages from the
2933                          * object here we can use a shared hold.
2934                          */
2935                         vm_object_hold_shared(object);
2936                         pmap_remove(map->pmap, s, e);
2937                         vm_object_drop(object);
2938                 } else if (object) {
2939                         vm_object_hold(object);
2940                         vm_object_chain_acquire(object, 0);
2941                         pmap_remove(map->pmap, s, e);
2942
2943                         if (object != NULL &&
2944                             object->ref_count != 1 &&
2945                             (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) ==
2946                              OBJ_ONEMAPPING &&
2947                             (object->type == OBJT_DEFAULT ||
2948                              object->type == OBJT_SWAP)) {
2949                                 vm_object_collapse(object, NULL);
2950                                 vm_object_page_remove(object, offidxstart,
2951                                                       offidxend, FALSE);
2952                                 if (object->type == OBJT_SWAP) {
2953                                         swap_pager_freespace(object,
2954                                                              offidxstart,
2955                                                              count);
2956                                 }
2957                                 if (offidxend >= object->size &&
2958                                     offidxstart < object->size) {
2959                                         object->size = offidxstart;
2960                                 }
2961                         }
2962                         vm_object_chain_release(object);
2963                         vm_object_drop(object);
2964                 } else if (entry->maptype == VM_MAPTYPE_UKSMAP) {
2965                         pmap_remove(map->pmap, s, e);
2966                 }
2967
2968                 /*
2969                  * Delete the entry (which may delete the object) only after
2970                  * removing all pmap entries pointing to its pages.
2971                  * (Otherwise, its page frames may be reallocated, and any
2972                  * modify bits will be set in the wrong object!)
2973                  */
2974                 vm_map_entry_delete(map, entry, countp);
2975                 entry = next;
2976         }
2977         lwkt_reltoken(&map->token);
2978         return (KERN_SUCCESS);
2979 }
2980
2981 /*
2982  * Remove the given address range from the target map.
2983  * This is the exported form of vm_map_delete.
2984  *
2985  * No requirements.
2986  */
2987 int
2988 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
2989 {
2990         int result;
2991         int count;
2992
2993         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2994         vm_map_lock(map);
2995         VM_MAP_RANGE_CHECK(map, start, end);
2996         result = vm_map_delete(map, start, end, &count);
2997         vm_map_unlock(map);
2998         vm_map_entry_release(count);
2999
3000         return (result);
3001 }
3002
3003 /*
3004  * Assert that the target map allows the specified privilege on the
3005  * entire address region given.  The entire region must be allocated.
3006  *
3007  * The caller must specify whether the vm_map is already locked or not.
3008  */
3009 boolean_t
3010 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
3011                         vm_prot_t protection, boolean_t have_lock)
3012 {
3013         vm_map_entry_t entry;
3014         vm_map_entry_t tmp_entry;
3015         boolean_t result;
3016
3017         if (have_lock == FALSE)
3018                 vm_map_lock_read(map);
3019
3020         if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
3021                 if (have_lock == FALSE)
3022                         vm_map_unlock_read(map);
3023                 return (FALSE);
3024         }
3025         entry = tmp_entry;
3026
3027         result = TRUE;
3028         while (start < end) {
3029                 if (entry == &map->header) {
3030                         result = FALSE;
3031                         break;
3032                 }
3033                 /*
3034                  * No holes allowed!
3035                  */
3036
3037                 if (start < entry->start) {
3038                         result = FALSE;
3039                         break;
3040                 }
3041                 /*
3042                  * Check protection associated with entry.
3043                  */
3044
3045                 if ((entry->protection & protection) != protection) {
3046                         result = FALSE;
3047                         break;
3048                 }
3049                 /* go to next entry */
3050
3051                 start = entry->end;
3052                 entry = entry->next;
3053         }
3054         if (have_lock == FALSE)
3055                 vm_map_unlock_read(map);
3056         return (result);
3057 }
3058
3059 /*
3060  * If appropriate this function shadows the original object with a new object
3061  * and moves the VM pages from the original object to the new object.
3062  * The original object will also be collapsed, if possible.
3063  *
3064  * We can only do this for normal memory objects with a single mapping, and
3065  * it only makes sense to do it if there are 2 or more refs on the original
3066  * object.  i.e. typically a memory object that has been extended into
3067  * multiple vm_map_entry's with non-overlapping ranges.
3068  *
3069  * This makes it easier to remove unused pages and keeps object inheritance
3070  * from being a negative impact on memory usage.
3071  *
3072  * On return the (possibly new) entry->object.vm_object will have an
3073  * additional ref on it for the caller to dispose of (usually by cloning
3074  * the vm_map_entry).  The additional ref had to be done in this routine
3075  * to avoid racing a collapse.  The object's ONEMAPPING flag will also be
3076  * cleared.
3077  *
3078  * The vm_map must be locked and its token held.
3079  */
3080 static void
3081 vm_map_split(vm_map_entry_t entry)
3082 {
3083         /* OPTIMIZED */
3084         vm_object_t oobject, nobject, bobject;
3085         vm_offset_t s, e;
3086         vm_page_t m;
3087         vm_pindex_t offidxstart, offidxend, idx;
3088         vm_size_t size;
3089         vm_ooffset_t offset;
3090         int useshadowlist;
3091
3092         /*
3093          * Optimize away object locks for vnode objects.  Important exit/exec
3094          * critical path.
3095          *
3096          * OBJ_ONEMAPPING doesn't apply to vnode objects but clear the flag
3097          * anyway.
3098          */
3099         oobject = entry->object.vm_object;
3100         if (oobject->type != OBJT_DEFAULT && oobject->type != OBJT_SWAP) {
3101                 vm_object_reference_quick(oobject);
3102                 vm_object_clear_flag(oobject, OBJ_ONEMAPPING);
3103                 return;
3104         }
3105
3106         /*
3107          * Setup.  Chain lock the original object throughout the entire
3108          * routine to prevent new page faults from occuring.
3109          *
3110          * XXX can madvise WILLNEED interfere with us too?
3111          */
3112         vm_object_hold(oobject);
3113         vm_object_chain_acquire(oobject, 0);
3114
3115         /*
3116          * Original object cannot be split?  Might have also changed state.
3117          */
3118         if (oobject->handle == NULL || (oobject->type != OBJT_DEFAULT &&
3119                                         oobject->type != OBJT_SWAP)) {
3120                 vm_object_chain_release(oobject);
3121                 vm_object_reference_locked(oobject);
3122                 vm_object_clear_flag(oobject, OBJ_ONEMAPPING);
3123                 vm_object_drop(oobject);
3124                 return;
3125         }
3126
3127         /*
3128          * Collapse original object with its backing store as an
3129          * optimization to reduce chain lengths when possible.
3130          *
3131          * If ref_count <= 1 there aren't other non-overlapping vm_map_entry's
3132          * for oobject, so there's no point collapsing it.
3133          *
3134          * Then re-check whether the object can be split.
3135          */
3136         vm_object_collapse(oobject, NULL);
3137
3138         if (oobject->ref_count <= 1 ||
3139             (oobject->type != OBJT_DEFAULT && oobject->type != OBJT_SWAP) ||
3140             (oobject->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) != OBJ_ONEMAPPING) {
3141                 vm_object_chain_release(oobject);
3142                 vm_object_reference_locked(oobject);
3143                 vm_object_clear_flag(oobject, OBJ_ONEMAPPING);
3144                 vm_object_drop(oobject);
3145                 return;
3146         }
3147
3148         /*
3149          * Acquire the chain lock on the backing object.
3150          *
3151          * Give bobject an additional ref count for when it will be shadowed
3152          * by nobject.
3153          */
3154         useshadowlist = 0;
3155         if ((bobject = oobject->backing_object) != NULL) {
3156                 if (bobject->type != OBJT_VNODE) {
3157                         useshadowlist = 1;
3158                         vm_object_hold(bobject);
3159                         vm_object_chain_wait(bobject, 0);
3160                         /* ref for shadowing below */
3161                         vm_object_reference_locked(bobject);
3162                         vm_object_chain_acquire(bobject, 0);
3163                         KKASSERT(bobject->backing_object == bobject);
3164                         KKASSERT((bobject->flags & OBJ_DEAD) == 0);
3165                 } else {
3166                         /*
3167                          * vnodes are not placed on the shadow list but
3168                          * they still get another ref for the backing_object
3169                          * reference.
3170                          */
3171                         vm_object_reference_quick(bobject);
3172                 }
3173         }
3174
3175         /*
3176          * Calculate the object page range and allocate the new object.
3177          */
3178         offset = entry->offset;
3179         s = entry->start;
3180         e = entry->end;
3181
3182         offidxstart = OFF_TO_IDX(offset);
3183         offidxend = offidxstart + OFF_TO_IDX(e - s);
3184         size = offidxend - offidxstart;
3185
3186         switch(oobject->type) {
3187         case OBJT_DEFAULT:
3188                 nobject = default_pager_alloc(NULL, IDX_TO_OFF(size),
3189                                               VM_PROT_ALL, 0);
3190                 break;
3191         case OBJT_SWAP:
3192                 nobject = swap_pager_alloc(NULL, IDX_TO_OFF(size),
3193                                            VM_PROT_ALL, 0);
3194                 break;
3195         default:
3196                 /* not reached */
3197                 nobject = NULL;
3198                 KKASSERT(0);
3199         }
3200
3201         if (nobject == NULL) {
3202                 if (bobject) {
3203                         if (useshadowlist) {
3204                                 vm_object_chain_release(bobject);
3205                                 vm_object_deallocate(bobject);
3206                                 vm_object_drop(bobject);
3207                         } else {
3208                                 vm_object_deallocate(bobject);
3209                         }
3210                 }
3211                 vm_object_chain_release(oobject);
3212                 vm_object_reference_locked(oobject);
3213                 vm_object_clear_flag(oobject, OBJ_ONEMAPPING);
3214                 vm_object_drop(oobject);
3215                 return;
3216         }
3217
3218         /*
3219          * The new object will replace entry->object.vm_object so it needs
3220          * a second reference (the caller expects an additional ref).
3221          */
3222         vm_object_hold(nobject);
3223         vm_object_reference_locked(nobject);
3224         vm_object_chain_acquire(nobject, 0);
3225
3226         /*
3227          * nobject shadows bobject (oobject already shadows bobject).
3228          *
3229          * Adding an object to bobject's shadow list requires refing bobject
3230          * which we did above in the useshadowlist case.
3231          */
3232         if (bobject) {
3233                 nobject->backing_object_offset =
3234                     oobject->backing_object_offset + IDX_TO_OFF(offidxstart);
3235                 nobject->backing_object = bobject;
3236                 if (useshadowlist) {
3237                         bobject->shadow_count++;
3238                         bobject->generation++;
3239                         LIST_INSERT_HEAD(&bobject->shadow_head,
3240                                          nobject, shadow_list);
3241                         vm_object_clear_flag(bobject, OBJ_ONEMAPPING); /*XXX*/
3242                         vm_object_chain_release(bobject);
3243                         vm_object_drop(bobject);
3244                         vm_object_set_flag(nobject, OBJ_ONSHADOW);
3245                 }
3246         }
3247
3248         /*
3249          * Move the VM pages from oobject to nobject
3250          */
3251         for (idx = 0; idx < size; idx++) {
3252                 vm_page_t m;
3253
3254                 m = vm_page_lookup_busy_wait(oobject, offidxstart + idx,
3255                                              TRUE, "vmpg");
3256                 if (m == NULL)
3257                         continue;
3258
3259                 /*
3260                  * We must wait for pending I/O to complete before we can
3261                  * rename the page.
3262                  *
3263                  * We do not have to VM_PROT_NONE the page as mappings should
3264                  * not be changed by this operation.
3265                  *
3266                  * NOTE: The act of renaming a page updates chaingen for both
3267                  *       objects.
3268                  */
3269                 vm_page_rename(m, nobject, idx);
3270                 /* page automatically made dirty by rename and cache handled */
3271                 /* page remains busy */
3272         }
3273
3274         if (oobject->type == OBJT_SWAP) {
3275                 vm_object_pip_add(oobject, 1);
3276                 /*
3277                  * copy oobject pages into nobject and destroy unneeded
3278                  * pages in shadow object.
3279                  */
3280                 swap_pager_copy(oobject, nobject, offidxstart, 0);
3281                 vm_object_pip_wakeup(oobject);
3282         }
3283
3284         /*
3285          * Wakeup the pages we played with.  No spl protection is needed
3286          * for a simple wakeup.
3287          */
3288         for (idx = 0; idx < size; idx++) {
3289                 m = vm_page_lookup(nobject, idx);
3290                 if (m) {
3291                         KKASSERT(m->flags & PG_BUSY);
3292                         vm_page_wakeup(m);
3293                 }
3294         }
3295         entry->object.vm_object = nobject;
3296         entry->offset = 0LL;
3297
3298         /*
3299          * Cleanup
3300          *
3301          * NOTE: There is no need to remove OBJ_ONEMAPPING from oobject, the
3302          *       related pages were moved and are no longer applicable to the
3303          *       original object.
3304          *
3305          * NOTE: Deallocate oobject (due to its entry->object.vm_object being
3306          *       replaced by nobject).
3307          */
3308         vm_object_chain_release(nobject);
3309         vm_object_drop(nobject);
3310         if (bobject && useshadowlist) {
3311                 vm_object_chain_release(bobject);
3312                 vm_object_drop(bobject);
3313         }
3314         vm_object_chain_release(oobject);
3315         /*vm_object_clear_flag(oobject, OBJ_ONEMAPPING);*/
3316         vm_object_deallocate_locked(oobject);
3317         vm_object_drop(oobject);
3318 }
3319
3320 /*
3321  * Copies the contents of the source entry to the destination
3322  * entry.  The entries *must* be aligned properly.
3323  *
3324  * The vm_maps must be exclusively locked.
3325  * The vm_map's token must be held.
3326  *
3327  * Because the maps are locked no faults can be in progress during the
3328  * operation.
3329  */
3330 static void
3331 vm_map_copy_entry(vm_map_t src_map, vm_map_t dst_map,
3332                   vm_map_entry_t src_entry, vm_map_entry_t dst_entry)
3333 {
3334         vm_object_t src_object;
3335
3336         if (dst_entry->maptype == VM_MAPTYPE_SUBMAP ||
3337             dst_entry->maptype == VM_MAPTYPE_UKSMAP)
3338                 return;
3339         if (src_entry->maptype == VM_MAPTYPE_SUBMAP ||
3340             src_entry->maptype == VM_MAPTYPE_UKSMAP)
3341                 return;
3342
3343         if (src_entry->wired_count == 0) {
3344                 /*
3345                  * If the source entry is marked needs_copy, it is already
3346                  * write-protected.
3347                  */
3348                 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
3349                         pmap_protect(src_map->pmap,
3350                             src_entry->start,
3351                             src_entry->end,
3352                             src_entry->protection & ~VM_PROT_WRITE);
3353                 }
3354
3355                 /*
3356                  * Make a copy of the object.
3357                  *
3358                  * The object must be locked prior to checking the object type
3359                  * and for the call to vm_object_collapse() and vm_map_split().
3360                  * We cannot use *_hold() here because the split code will
3361                  * probably try to destroy the object.  The lock is a pool
3362                  * token and doesn't care.
3363                  *
3364                  * We must bump src_map->timestamp when setting
3365                  * MAP_ENTRY_NEEDS_COPY to force any concurrent fault
3366                  * to retry, otherwise the concurrent fault might improperly
3367                  * install a RW pte when its supposed to be a RO(COW) pte.
3368                  * This race can occur because a vnode-backed fault may have
3369                  * to temporarily release the map lock.
3370                  */
3371                 if (src_entry->object.vm_object != NULL) {
3372                         vm_map_split(src_entry);
3373                         src_object = src_entry->object.vm_object;
3374                         dst_entry->object.vm_object = src_object;
3375                         src_entry->eflags |= (MAP_ENTRY_COW |
3376                                               MAP_ENTRY_NEEDS_COPY);
3377                         dst_entry->eflags |= (MAP_ENTRY_COW |
3378                                               MAP_ENTRY_NEEDS_COPY);
3379                         dst_entry->offset = src_entry->offset;
3380                         ++src_map->timestamp;
3381                 } else {
3382                         dst_entry->object.vm_object = NULL;
3383                         dst_entry->offset = 0;
3384                 }
3385
3386                 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
3387                     dst_entry->end - dst_entry->start, src_entry->start);
3388         } else {
3389                 /*
3390                  * Of course, wired down pages can't be set copy-on-write.
3391                  * Cause wired pages to be copied into the new map by
3392                  * simulating faults (the new pages are pageable)
3393                  */
3394                 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
3395         }
3396 }
3397
3398 /*
3399  * vmspace_fork:
3400  * Create a new process vmspace structure and vm_map
3401  * based on those of an existing process.  The new map
3402  * is based on the old map, according to the inheritance
3403  * values on the regions in that map.
3404  *
3405  * The source map must not be locked.
3406  * No requirements.
3407  */
3408 static void vmspace_fork_normal_entry(vm_map_t old_map, vm_map_t new_map,
3409                           vm_map_entry_t old_entry, int *countp);
3410 static void vmspace_fork_uksmap_entry(vm_map_t old_map, vm_map_t new_map,
3411                           vm_map_entry_t old_entry, int *countp);
3412
3413 struct vmspace *
3414 vmspace_fork(struct vmspace *vm1)
3415 {
3416         struct vmspace *vm2;
3417         vm_map_t old_map = &vm1->vm_map;
3418         vm_map_t new_map;
3419         vm_map_entry_t old_entry;
3420         int count;
3421
3422         lwkt_gettoken(&vm1->vm_map.token);
3423         vm_map_lock(old_map);
3424
3425         vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
3426         lwkt_gettoken(&vm2->vm_map.token);
3427         bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
3428             (caddr_t)&vm1->vm_endcopy - (caddr_t)&vm1->vm_startcopy);
3429         new_map = &vm2->vm_map; /* XXX */
3430         new_map->timestamp = 1;
3431
3432         vm_map_lock(new_map);
3433
3434         count = 0;
3435         old_entry = old_map->header.next;
3436         while (old_entry != &old_map->header) {
3437                 ++count;
3438                 old_entry = old_entry->next;
3439         }
3440
3441         count = vm_map_entry_reserve(count + MAP_RESERVE_COUNT);
3442
3443         old_entry = old_map->header.next;
3444         while (old_entry != &old_map->header) {
3445                 switch(old_entry->maptype) {
3446                 case VM_MAPTYPE_SUBMAP:
3447                         panic("vm_map_fork: encountered a submap");
3448                         break;
3449                 case VM_MAPTYPE_UKSMAP:
3450                         vmspace_fork_uksmap_entry(old_map, new_map,
3451                                                   old_entry, &count);
3452                         break;
3453                 case VM_MAPTYPE_NORMAL:
3454                 case VM_MAPTYPE_VPAGETABLE:
3455                         vmspace_fork_normal_entry(old_map, new_map,
3456                                                   old_entry, &count);
3457                         break;
3458                 }
3459                 old_entry = old_entry->next;
3460         }
3461
3462         new_map->size = old_map->size;
3463         vm_map_unlock(old_map);
3464         vm_map_unlock(new_map);
3465         vm_map_entry_release(count);
3466
3467         lwkt_reltoken(&vm2->vm_map.token);
3468         lwkt_reltoken(&vm1->vm_map.token);
3469
3470         return (vm2);
3471 }
3472
3473 static
3474 void
3475 vmspace_fork_normal_entry(vm_map_t old_map, vm_map_t new_map,
3476                           vm_map_entry_t old_entry, int *countp)
3477 {
3478         vm_map_entry_t new_entry;
3479         vm_object_t object;
3480
3481         switch (old_entry->inheritance) {
3482         case VM_INHERIT_NONE:
3483                 break;
3484         case VM_INHERIT_SHARE:
3485                 /*
3486                  * Clone the entry, creating the shared object if
3487                  * necessary.
3488                  */
3489                 if (old_entry->object.vm_object == NULL)
3490                         vm_map_entry_allocate_object(old_entry);
3491
3492                 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3493                         /*
3494                          * Shadow a map_entry which needs a copy,
3495                          * replacing its object with a new object
3496                          * that points to the old one.  Ask the
3497                          * shadow code to automatically add an
3498                          * additional ref.  We can't do it afterwords
3499                          * because we might race a collapse.  The call
3500                          * to vm_map_entry_shadow() will also clear
3501                          * OBJ_ONEMAPPING.
3502                          */
3503                         vm_map_entry_shadow(old_entry, 1);
3504                 } else if (old_entry->object.vm_object) {
3505                         /*
3506                          * We will make a shared copy of the object,
3507                          * and must clear OBJ_ONEMAPPING.
3508                          *
3509                          * Optimize vnode objects.  OBJ_ONEMAPPING
3510                          * is non-applicable but clear it anyway,
3511                          * and its terminal so we don'th ave to deal
3512                          * with chains.  Reduces SMP conflicts.
3513                          *
3514                          * XXX assert that object.vm_object != NULL
3515                          *     since we allocate it above.
3516                          */
3517                         object = old_entry->object.vm_object;
3518                         if (object->type == OBJT_VNODE) {
3519                                 vm_object_reference_quick(object);
3520                                 vm_object_clear_flag(object,
3521                                                      OBJ_ONEMAPPING);
3522                         } else {
3523                                 vm_object_hold(object);
3524                                 vm_object_chain_wait(object, 0);
3525                                 vm_object_reference_locked(object);
3526                                 vm_object_clear_flag(object,
3527                                                      OBJ_ONEMAPPING);
3528                                 vm_object_drop(object);
3529                         }
3530                 }
3531
3532                 /*
3533                  * Clone the entry.  We've already bumped the ref on
3534                  * any vm_object.
3535                  */
3536                 new_entry = vm_map_entry_create(new_map, countp);
3537                 *new_entry = *old_entry;
3538                 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3539                 new_entry->wired_count = 0;
3540
3541                 /*
3542                  * Insert the entry into the new map -- we know we're
3543                  * inserting at the end of the new map.
3544                  */
3545
3546                 vm_map_entry_link(new_map, new_map->header.prev,
3547                                   new_entry);
3548
3549                 /*
3550                  * Update the physical map
3551                  */
3552                 pmap_copy(new_map->pmap, old_map->pmap,
3553                           new_entry->start,
3554                           (old_entry->end - old_entry->start),
3555                           old_entry->start);
3556                 break;
3557         case VM_INHERIT_COPY:
3558                 /*
3559                  * Clone the entry and link into the map.
3560                  */
3561                 new_entry = vm_map_entry_create(new_map, countp);
3562                 *new_entry = *old_entry;
3563                 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3564                 new_entry->wired_count = 0;
3565                 new_entry->object.vm_object = NULL;
3566                 vm_map_entry_link(new_map, new_map->header.prev,
3567                                   new_entry);
3568                 vm_map_copy_entry(old_map, new_map, old_entry,
3569                                   new_entry);
3570                 break;
3571         }
3572 }
3573
3574 /*
3575  * When forking user-kernel shared maps, the map might change in the
3576  * child so do not try to copy the underlying pmap entries.
3577  */
3578 static
3579 void
3580 vmspace_fork_uksmap_entry(vm_map_t old_map, vm_map_t new_map,
3581                           vm_map_entry_t old_entry, int *countp)
3582 {
3583         vm_map_entry_t new_entry;
3584
3585         new_entry = vm_map_entry_create(new_map, countp);
3586         *new_entry = *old_entry;
3587         new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3588         new_entry->wired_count = 0;
3589         vm_map_entry_link(new_map, new_map->header.prev,
3590                           new_entry);
3591 }
3592
3593 /*
3594  * Create an auto-grow stack entry
3595  *
3596  * No requirements.
3597  */
3598 int
3599 vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
3600               int flags, vm_prot_t prot, vm_prot_t max, int cow)
3601 {
3602         vm_map_entry_t  prev_entry;
3603         vm_map_entry_t  new_stack_entry;
3604         vm_size_t       init_ssize;
3605         int             rv;
3606         int             count;
3607         vm_offset_t     tmpaddr;
3608
3609         cow |= MAP_IS_STACK;
3610
3611         if (max_ssize < sgrowsiz)
3612                 init_ssize = max_ssize;
3613         else
3614                 init_ssize = sgrowsiz;
3615
3616         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
3617         vm_map_lock(map);
3618
3619         /*
3620          * Find space for the mapping
3621          */
3622         if ((flags & (MAP_FIXED | MAP_TRYFIXED)) == 0) {
3623                 if (vm_map_findspace(map, addrbos, max_ssize, 1,
3624                                      flags, &tmpaddr)) {
3625                         vm_map_unlock(map);
3626                         vm_map_entry_release(count);
3627                         return (KERN_NO_SPACE);
3628                 }
3629                 addrbos = tmpaddr;
3630         }
3631
3632         /* If addr is already mapped, no go */
3633         if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
3634                 vm_map_unlock(map);
3635                 vm_map_entry_release(count);
3636                 return (KERN_NO_SPACE);
3637         }
3638
3639 #if 0
3640         /* XXX already handled by kern_mmap() */
3641         /* If we would blow our VMEM resource limit, no go */
3642         if (map->size + init_ssize >
3643             curproc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
3644                 vm_map_unlock(map);
3645                 vm_map_entry_release(count);
3646                 return (KERN_NO_SPACE);
3647         }
3648 #endif
3649
3650         /*
3651          * If we can't accomodate max_ssize in the current mapping,
3652          * no go.  However, we need to be aware that subsequent user
3653          * mappings might map into the space we have reserved for
3654          * stack, and currently this space is not protected.  
3655          * 
3656          * Hopefully we will at least detect this condition 
3657          * when we try to grow the stack.
3658          */
3659         if ((prev_entry->next != &map->header) &&
3660             (prev_entry->next->start < addrbos + max_ssize)) {
3661                 vm_map_unlock(map);
3662                 vm_map_entry_release(count);
3663                 return (KERN_NO_SPACE);
3664         }
3665
3666         /*
3667          * We initially map a stack of only init_ssize.  We will
3668          * grow as needed later.  Since this is to be a grow 
3669          * down stack, we map at the top of the range.
3670          *
3671          * Note: we would normally expect prot and max to be
3672          * VM_PROT_ALL, and cow to be 0.  Possibly we should
3673          * eliminate these as input parameters, and just
3674          * pass these values here in the insert call.
3675          */
3676         rv = vm_map_insert(map, &count, NULL, NULL,
3677                            0, addrbos + max_ssize - init_ssize,
3678                            addrbos + max_ssize,
3679                            VM_MAPTYPE_NORMAL,
3680                            prot, max, cow);
3681
3682         /* Now set the avail_ssize amount */
3683         if (rv == KERN_SUCCESS) {
3684                 if (prev_entry != &map->header)
3685                         vm_map_clip_end(map, prev_entry, addrbos + max_ssize - init_ssize, &count);
3686                 new_stack_entry = prev_entry->next;
3687                 if (new_stack_entry->end   != addrbos + max_ssize ||
3688                     new_stack_entry->start != addrbos + max_ssize - init_ssize)
3689                         panic ("Bad entry start/end for new stack entry");
3690                 else 
3691                         new_stack_entry->aux.avail_ssize = max_ssize - init_ssize;
3692         }
3693
3694         vm_map_unlock(map);
3695         vm_map_entry_release(count);
3696         return (rv);
3697 }
3698
3699 /*
3700  * Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
3701  * desired address is already mapped, or if we successfully grow
3702  * the stack.  Also returns KERN_SUCCESS if addr is outside the
3703  * stack range (this is strange, but preserves compatibility with
3704  * the grow function in vm_machdep.c).
3705  *
3706  * No requirements.
3707  */
3708 int
3709 vm_map_growstack (struct proc *p, vm_offset_t addr)
3710 {
3711         vm_map_entry_t prev_entry;
3712         vm_map_entry_t stack_entry;
3713         vm_map_entry_t new_stack_entry;
3714         struct vmspace *vm = p->p_vmspace;
3715         vm_map_t map = &vm->vm_map;
3716         vm_offset_t    end;
3717         int grow_amount;
3718         int rv = KERN_SUCCESS;
3719         int is_procstack;
3720         int use_read_lock = 1;
3721         int count;
3722
3723         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
3724 Retry:
3725         if (use_read_lock)
3726                 vm_map_lock_read(map);
3727         else
3728                 vm_map_lock(map);
3729
3730         /* If addr is already in the entry range, no need to grow.*/
3731         if (vm_map_lookup_entry(map, addr, &prev_entry))
3732                 goto done;
3733
3734         if ((stack_entry = prev_entry->next) == &map->header)
3735                 goto done;
3736         if (prev_entry == &map->header) 
3737                 end = stack_entry->start - stack_entry->aux.avail_ssize;
3738         else
3739                 end = prev_entry->end;
3740
3741         /*
3742          * This next test mimics the old grow function in vm_machdep.c.
3743          * It really doesn't quite make sense, but we do it anyway
3744          * for compatibility.
3745          *
3746          * If not growable stack, return success.  This signals the
3747          * caller to proceed as he would normally with normal vm.
3748          */
3749         if (stack_entry->aux.avail_ssize < 1 ||
3750             addr >= stack_entry->start ||
3751             addr <  stack_entry->start - stack_entry->aux.avail_ssize) {
3752                 goto done;
3753         } 
3754         
3755         /* Find the minimum grow amount */
3756         grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE);
3757         if (grow_amount > stack_entry->aux.avail_ssize) {
3758                 rv = KERN_NO_SPACE;
3759                 goto done;
3760         }
3761
3762         /*
3763          * If there is no longer enough space between the entries
3764          * nogo, and adjust the available space.  Note: this 
3765          * should only happen if the user has mapped into the
3766          * stack area after the stack was created, and is
3767          * probably an error.
3768          *
3769          * This also effectively destroys any guard page the user
3770          * might have intended by limiting the stack size.
3771          */
3772         if (grow_amount > stack_entry->start - end) {
3773                 if (use_read_lock && vm_map_lock_upgrade(map)) {
3774                         /* lost lock */
3775                         use_read_lock = 0;
3776                         goto Retry;
3777                 }
3778                 use_read_lock = 0;
3779                 stack_entry->aux.avail_ssize = stack_entry->start - end;
3780                 rv = KERN_NO_SPACE;
3781                 goto done;
3782         }
3783
3784         is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr;
3785
3786         /* If this is the main process stack, see if we're over the 
3787          * stack limit.
3788          */
3789         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
3790                              p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
3791                 rv = KERN_NO_SPACE;
3792                 goto done;
3793         }
3794
3795         /* Round up the grow amount modulo SGROWSIZ */
3796         grow_amount = roundup (grow_amount, sgrowsiz);
3797         if (grow_amount > stack_entry->aux.avail_ssize) {
3798                 grow_amount = stack_entry->aux.avail_ssize;
3799         }
3800         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
3801                              p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
3802                 grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
3803                               ctob(vm->vm_ssize);
3804         }
3805
3806         /* If we would blow our VMEM resource limit, no go */
3807         if (map->size + grow_amount > p->p_rlimit[RLIMIT_VMEM].rlim_cur) {
3808                 rv = KERN_NO_SPACE;
3809                 goto done;
3810         }
3811
3812         if (use_read_lock && vm_map_lock_upgrade(map)) {
3813                 /* lost lock */
3814                 use_read_lock = 0;
3815                 goto Retry;
3816         }
3817         use_read_lock = 0;
3818
3819         /* Get the preliminary new entry start value */
3820         addr = stack_entry->start - grow_amount;
3821
3822         /* If this puts us into the previous entry, cut back our growth
3823          * to the available space.  Also, see the note above.
3824          */
3825         if (addr < end) {
3826                 stack_entry->aux.avail_ssize = stack_entry->start - end;
3827                 addr = end;
3828         }
3829
3830         rv = vm_map_insert(map, &count, NULL, NULL,
3831                            0, addr, stack_entry->start,
3832                            VM_MAPTYPE_NORMAL,
3833                            VM_PROT_ALL, VM_PROT_ALL, 0);
3834
3835         /* Adjust the available stack space by the amount we grew. */
3836         if (rv == KERN_SUCCESS) {
3837                 if (prev_entry != &map->header)
3838                         vm_map_clip_end(map, prev_entry, addr, &count);
3839                 new_stack_entry = prev_entry->next;
3840                 if (new_stack_entry->end   != stack_entry->start  ||
3841                     new_stack_entry->start != addr)
3842                         panic ("Bad stack grow start/end in new stack entry");
3843                 else {
3844                         new_stack_entry->aux.avail_ssize =
3845                                 stack_entry->aux.avail_ssize -
3846                                 (new_stack_entry->end - new_stack_entry->start);
3847                         if (is_procstack)
3848                                 vm->vm_ssize += btoc(new_stack_entry->end -
3849                                                      new_stack_entry->start);
3850                 }
3851
3852                 if (map->flags & MAP_WIREFUTURE)
3853                         vm_map_unwire(map, new_stack_entry->start,
3854                                       new_stack_entry->end, FALSE);
3855         }
3856
3857 done:
3858         if (use_read_lock)
3859                 vm_map_unlock_read(map);
3860         else
3861                 vm_map_unlock(map);
3862         vm_map_entry_release(count);
3863         return (rv);
3864 }
3865
3866 /*
3867  * Unshare the specified VM space for exec.  If other processes are
3868  * mapped to it, then create a new one.  The new vmspace is null.
3869  *
3870  * No requirements.
3871  */
3872 void
3873 vmspace_exec(struct proc *p, struct vmspace *vmcopy) 
3874 {
3875         struct vmspace *oldvmspace = p->p_vmspace;
3876         struct vmspace *newvmspace;
3877         vm_map_t map = &p->p_vmspace->vm_map;
3878
3879         /*
3880          * If we are execing a resident vmspace we fork it, otherwise
3881          * we create a new vmspace.  Note that exitingcnt is not
3882          * copied to the new vmspace.
3883          */
3884         lwkt_gettoken(&oldvmspace->vm_map.token);
3885         if (vmcopy)  {
3886                 newvmspace = vmspace_fork(vmcopy);
3887                 lwkt_gettoken(&newvmspace->vm_map.token);
3888         } else {
3889                 newvmspace = vmspace_alloc(map->min_offset, map->max_offset);
3890                 lwkt_gettoken(&newvmspace->vm_map.token);
3891                 bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
3892                       (caddr_t)&oldvmspace->vm_endcopy -
3893                        (caddr_t)&oldvmspace->vm_startcopy);
3894         }
3895
3896         /*
3897          * Finish initializing the vmspace before assigning it
3898          * to the process.  The vmspace will become the current vmspace
3899          * if p == curproc.
3900          */
3901         pmap_pinit2(vmspace_pmap(newvmspace));
3902         pmap_replacevm(p, newvmspace, 0);
3903         lwkt_reltoken(&newvmspace->vm_map.token);
3904         lwkt_reltoken(&oldvmspace->vm_map.token);
3905         vmspace_rel(oldvmspace);
3906 }
3907
3908 /*
3909  * Unshare the specified VM space for forcing COW.  This
3910  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
3911  */
3912 void
3913 vmspace_unshare(struct proc *p) 
3914 {
3915         struct vmspace *oldvmspace = p->p_vmspace;
3916         struct vmspace *newvmspace;
3917
3918         lwkt_gettoken(&oldvmspace->vm_map.token);
3919         if (vmspace_getrefs(oldvmspace) == 1) {
3920                 lwkt_reltoken(&oldvmspace->vm_map.token);
3921                 return;
3922         }
3923         newvmspace = vmspace_fork(oldvmspace);
3924         lwkt_gettoken(&newvmspace->vm_map.token);
3925         pmap_pinit2(vmspace_pmap(newvmspace));
3926         pmap_replacevm(p, newvmspace, 0);
3927         lwkt_reltoken(&newvmspace->vm_map.token);
3928         lwkt_reltoken(&oldvmspace->vm_map.token);
3929         vmspace_rel(oldvmspace);
3930 }
3931
3932 /*
3933  * vm_map_hint: return the beginning of the best area suitable for
3934  * creating a new mapping with "prot" protection.
3935  *
3936  * No requirements.
3937  */
3938 vm_offset_t
3939 vm_map_hint(struct proc *p, vm_offset_t addr, vm_prot_t prot)
3940 {
3941         struct vmspace *vms = p->p_vmspace;
3942
3943         if (!randomize_mmap || addr != 0) {
3944                 /*
3945                  * Set a reasonable start point for the hint if it was
3946                  * not specified or if it falls within the heap space.
3947                  * Hinted mmap()s do not allocate out of the heap space.
3948                  */
3949                 if (addr == 0 ||
3950                     (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
3951                      addr < round_page((vm_offset_t)vms->vm_daddr + maxdsiz))) {
3952                         addr = round_page((vm_offset_t)vms->vm_daddr + maxdsiz);
3953                 }
3954
3955                 return addr;
3956         }
3957         addr = (vm_offset_t)vms->vm_daddr + MAXDSIZ;
3958         addr += karc4random() & (MIN((256 * 1024 * 1024), MAXDSIZ) - 1);
3959
3960         return (round_page(addr));
3961 }
3962
3963 /*
3964  * Finds the VM object, offset, and protection for a given virtual address
3965  * in the specified map, assuming a page fault of the type specified.
3966  *
3967  * Leaves the map in question locked for read; return values are guaranteed
3968  * until a vm_map_lookup_done call is performed.  Note that the map argument
3969  * is in/out; the returned map must be used in the call to vm_map_lookup_done.
3970  *
3971  * A handle (out_entry) is returned for use in vm_map_lookup_done, to make
3972  * that fast.
3973  *
3974  * If a lookup is requested with "write protection" specified, the map may
3975  * be changed to perform virtual copying operations, although the data
3976  * referenced will remain the same.
3977  *
3978  * No requirements.
3979  */
3980 int
3981 vm_map_lookup(vm_map_t *var_map,                /* IN/OUT */
3982               vm_offset_t vaddr,
3983               vm_prot_t fault_typea,
3984               vm_map_entry_t *out_entry,        /* OUT */
3985               vm_object_t *object,              /* OUT */
3986               vm_pindex_t *pindex,              /* OUT */
3987               vm_prot_t *out_prot,              /* OUT */
3988               boolean_t *wired)                 /* OUT */
3989 {
3990         vm_map_entry_t entry;
3991         vm_map_t map = *var_map;
3992         vm_prot_t prot;
3993         vm_prot_t fault_type = fault_typea;
3994         int use_read_lock = 1;
3995         int rv = KERN_SUCCESS;
3996
3997 RetryLookup:
3998         if (use_read_lock)
3999                 vm_map_lock_read(map);
4000         else
4001                 vm_map_lock(map);
4002
4003         /*
4004          * If the map has an interesting hint, try it before calling full
4005          * blown lookup routine.
4006          */
4007         entry = map->hint;
4008         cpu_ccfence();
4009         *out_entry = entry;
4010         *object = NULL;
4011
4012         if ((entry == &map->header) ||
4013             (vaddr < entry->start) || (vaddr >= entry->end)) {
4014                 vm_map_entry_t tmp_entry;
4015
4016                 /*
4017                  * Entry was either not a valid hint, or the vaddr was not
4018                  * contained in the entry, so do a full lookup.
4019                  */
4020                 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) {
4021                         rv = KERN_INVALID_ADDRESS;
4022                         goto done;
4023                 }
4024
4025                 entry = tmp_entry;
4026                 *out_entry = entry;
4027         }
4028         
4029         /*
4030          * Handle submaps.
4031          */
4032         if (entry->maptype == VM_MAPTYPE_SUBMAP) {
4033                 vm_map_t old_map = map;
4034
4035                 *var_map = map = entry->object.sub_map;
4036                 if (use_read_lock)
4037                         vm_map_unlock_read(old_map);
4038                 else
4039                         vm_map_unlock(old_map);
4040                 use_read_lock = 1;
4041                 goto RetryLookup;
4042         }
4043
4044         /*
4045          * Check whether this task is allowed to have this page.
4046          * Note the special case for MAP_ENTRY_COW
4047          * pages with an override.  This is to implement a forced
4048          * COW for debuggers.
4049          */
4050
4051         if (fault_type & VM_PROT_OVERRIDE_WRITE)
4052                 prot = entry->max_protection;
4053         else
4054                 prot = entry->protection;
4055
4056         fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
4057         if ((fault_type & prot) != fault_type) {
4058                 rv = KERN_PROTECTION_FAILURE;
4059                 goto done;
4060         }
4061
4062         if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
4063             (entry->eflags & MAP_ENTRY_COW) &&
4064             (fault_type & VM_PROT_WRITE) &&
4065             (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
4066                 rv = KERN_PROTECTION_FAILURE;
4067                 goto done;
4068         }
4069
4070         /*
4071          * If this page is not pageable, we have to get it for all possible
4072          * accesses.
4073          */
4074         *wired = (entry->wired_count != 0);
4075         if (*wired)
4076                 prot = fault_type = entry->protection;
4077
4078         /*
4079          * Virtual page tables may need to update the accessed (A) bit
4080          * in a page table entry.  Upgrade the fault to a write fault for
4081          * that case if the map will support it.  If the map does not support
4082          * it the page table entry simply will not be updated.
4083          */
4084         if (entry->maptype == VM_MAPTYPE_VPAGETABLE) {
4085                 if (prot & VM_PROT_WRITE)
4086                         fault_type |= VM_PROT_WRITE;
4087         }
4088
4089         if (curthread->td_lwp && curthread->td_lwp->lwp_vmspace &&
4090             pmap_emulate_ad_bits(&curthread->td_lwp->lwp_vmspace->vm_pmap)) {
4091                 if ((prot & VM_PROT_WRITE) == 0)
4092                         fault_type |= VM_PROT_WRITE;
4093         }
4094
4095         /*
4096          * Only NORMAL and VPAGETABLE maps are object-based.  UKSMAPs are not.
4097          */
4098         if (entry->maptype != VM_MAPTYPE_NORMAL &&
4099             entry->maptype != VM_MAPTYPE_VPAGETABLE) {
4100                 *object = NULL;
4101                 goto skip;
4102         }
4103
4104         /*
4105          * If the entry was copy-on-write, we either ...
4106          */
4107         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4108                 /*
4109                  * If we want to write the page, we may as well handle that
4110                  * now since we've got the map locked.
4111                  *
4112                  * If we don't need to write the page, we just demote the
4113                  * permissions allowed.
4114                  */
4115
4116                 if (fault_type & VM_PROT_WRITE) {
4117                         /*
4118                          * Not allowed if TDF_NOFAULT is set as the shadowing
4119                          * operation can deadlock against the faulting
4120                          * function due to the copy-on-write.
4121                          */
4122                         if (curthread->td_flags & TDF_NOFAULT) {
4123                                 rv = KERN_FAILURE_NOFAULT;
4124                                 goto done;
4125                         }
4126
4127                         /*
4128                          * Make a new object, and place it in the object
4129                          * chain.  Note that no new references have appeared
4130                          * -- one just moved from the map to the new
4131                          * object.
4132                          */
4133
4134                         if (use_read_lock && vm_map_lock_upgrade(map)) {
4135                                 /* lost lock */
4136                                 use_read_lock = 0;
4137                                 goto RetryLookup;
4138                         }
4139                         use_read_lock = 0;
4140
4141                         vm_map_entry_shadow(entry, 0);
4142                 } else {
4143                         /*
4144                          * We're attempting to read a copy-on-write page --
4145                          * don't allow writes.
4146                          */
4147
4148                         prot &= ~VM_PROT_WRITE;
4149                 }
4150         }
4151
4152         /*
4153          * Create an object if necessary.
4154          */
4155         if (entry->object.vm_object == NULL && !map->system_map) {
4156                 if (use_read_lock && vm_map_lock_upgrade(map))  {
4157                         /* lost lock */
4158                         use_read_lock = 0;
4159                         goto RetryLookup;
4160                 }
4161                 use_read_lock = 0;
4162                 vm_map_entry_allocate_object(entry);
4163         }
4164
4165         /*
4166          * Return the object/offset from this entry.  If the entry was
4167          * copy-on-write or empty, it has been fixed up.
4168          */
4169         *object = entry->object.vm_object;
4170
4171 skip:
4172         *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
4173
4174         /*
4175          * Return whether this is the only map sharing this data.  On
4176          * success we return with a read lock held on the map.  On failure
4177          * we return with the map unlocked.
4178          */
4179         *out_prot = prot;
4180 done:
4181         if (rv == KERN_SUCCESS) {
4182                 if (use_read_lock == 0)
4183                         vm_map_lock_downgrade(map);
4184         } else if (use_read_lock) {
4185                 vm_map_unlock_read(map);
4186         } else {
4187                 vm_map_unlock(map);
4188         }
4189         return (rv);
4190 }
4191
4192 /*
4193  * Releases locks acquired by a vm_map_lookup()
4194  * (according to the handle returned by that lookup).
4195  *
4196  * No other requirements.
4197  */
4198 void
4199 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry, int count)
4200 {
4201         /*
4202          * Unlock the main-level map
4203          */
4204         vm_map_unlock_read(map);
4205         if (count)
4206                 vm_map_entry_release(count);
4207 }
4208
4209 #include "opt_ddb.h"
4210 #ifdef DDB
4211 #include <sys/kernel.h>
4212
4213 #include <ddb/ddb.h>
4214
4215 /*
4216  * Debugging only
4217  */
4218 DB_SHOW_COMMAND(map, vm_map_print)
4219 {
4220         static int nlines;
4221         /* XXX convert args. */
4222         vm_map_t map = (vm_map_t)addr;
4223         boolean_t full = have_addr;
4224
4225         vm_map_entry_t entry;
4226
4227         db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
4228             (void *)map,
4229             (void *)map->pmap, map->nentries, map->timestamp);
4230         nlines++;
4231
4232         if (!full && db_indent)
4233                 return;
4234
4235         db_indent += 2;
4236         for (entry = map->header.next; entry != &map->header;
4237             entry = entry->next) {
4238                 db_iprintf("map entry %p: start=%p, end=%p\n",
4239                     (void *)entry, (void *)entry->start, (void *)entry->end);
4240                 nlines++;
4241                 {
4242                         static char *inheritance_name[4] =
4243                         {"share", "copy", "none", "donate_copy"};
4244
4245                         db_iprintf(" prot=%x/%x/%s",
4246                             entry->protection,
4247                             entry->max_protection,
4248                             inheritance_name[(int)(unsigned char)entry->inheritance]);
4249                         if (entry->wired_count != 0)
4250                                 db_printf(", wired");
4251                 }
4252                 switch(entry->maptype) {
4253                 case VM_MAPTYPE_SUBMAP:
4254                         /* XXX no %qd in kernel.  Truncate entry->offset. */
4255                         db_printf(", share=%p, offset=0x%lx\n",
4256                             (void *)entry->object.sub_map,
4257                             (long)entry->offset);
4258                         nlines++;
4259                         if ((entry->prev == &map->header) ||
4260                             (entry->prev->object.sub_map !=
4261                                 entry->object.sub_map)) {
4262                                 db_indent += 2;
4263                                 vm_map_print((db_expr_t)(intptr_t)
4264                                              entry->object.sub_map,
4265                                              full, 0, NULL);
4266                                 db_indent -= 2;
4267                         }
4268                         break;
4269                 case VM_MAPTYPE_NORMAL:
4270                 case VM_MAPTYPE_VPAGETABLE:
4271                         /* XXX no %qd in kernel.  Truncate entry->offset. */
4272                         db_printf(", object=%p, offset=0x%lx",
4273                             (void *)entry->object.vm_object,
4274                             (long)entry->offset);
4275                         if (entry->eflags & MAP_ENTRY_COW)
4276                                 db_printf(", copy (%s)",
4277                                     (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
4278                         db_printf("\n");
4279                         nlines++;
4280
4281                         if ((entry->prev == &map->header) ||
4282                             (entry->prev->object.vm_object !=
4283                                 entry->object.vm_object)) {
4284                                 db_indent += 2;
4285                                 vm_object_print((db_expr_t)(intptr_t)
4286                                                 entry->object.vm_object,
4287                                                 full, 0, NULL);
4288                                 nlines += 4;
4289                                 db_indent -= 2;
4290                         }
4291                         break;
4292                 case VM_MAPTYPE_UKSMAP:
4293                         db_printf(", uksmap=%p, offset=0x%lx",
4294                             (void *)entry->object.uksmap,
4295                             (long)entry->offset);
4296                         if (entry->eflags & MAP_ENTRY_COW)
4297                                 db_printf(", copy (%s)",
4298                                     (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
4299                         db_printf("\n");
4300                         nlines++;
4301                         break;
4302                 default:
4303                         break;
4304                 }
4305         }
4306         db_indent -= 2;
4307         if (db_indent == 0)
4308                 nlines = 0;
4309 }
4310
4311 /*
4312  * Debugging only
4313  */
4314 DB_SHOW_COMMAND(procvm, procvm)
4315 {
4316         struct proc *p;
4317
4318         if (have_addr) {
4319                 p = (struct proc *) addr;
4320         } else {
4321                 p = curproc;
4322         }
4323
4324         db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
4325             (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
4326             (void *)vmspace_pmap(p->p_vmspace));
4327
4328         vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
4329 }
4330
4331 #endif /* DDB */