kernel - Fix bottlenecks that develop when many processes are running
[dragonfly.git] / sys / vm / vm_map.c
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1991, 1993
5  *      The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *      from: @(#)vm_map.c      8.3 (Berkeley) 1/12/94
35  *
36  *
37  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38  * All rights reserved.
39  *
40  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41  *
42  * Permission to use, copy, modify and distribute this software and
43  * its documentation is hereby granted, provided that both the copyright
44  * notice and this permission notice appear in all copies of the
45  * software, derivative works or modified versions, and any portions
46  * thereof, and that both notices appear in supporting documentation.
47  *
48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51  *
52  * Carnegie Mellon requests users of this software to return to
53  *
54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55  *  School of Computer Science
56  *  Carnegie Mellon University
57  *  Pittsburgh PA 15213-3890
58  *
59  * any improvements or extensions that they make and grant Carnegie the
60  * rights to redistribute these changes.
61  *
62  * $FreeBSD: src/sys/vm/vm_map.c,v 1.187.2.19 2003/05/27 00:47:02 alc Exp $
63  */
64
65 /*
66  *      Virtual memory mapping module.
67  */
68
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kernel.h>
72 #include <sys/proc.h>
73 #include <sys/serialize.h>
74 #include <sys/lock.h>
75 #include <sys/vmmeter.h>
76 #include <sys/mman.h>
77 #include <sys/vnode.h>
78 #include <sys/resourcevar.h>
79 #include <sys/shm.h>
80 #include <sys/tree.h>
81 #include <sys/malloc.h>
82 #include <sys/objcache.h>
83
84 #include <vm/vm.h>
85 #include <vm/vm_param.h>
86 #include <vm/pmap.h>
87 #include <vm/vm_map.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_object.h>
90 #include <vm/vm_pager.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_extern.h>
93 #include <vm/swap_pager.h>
94 #include <vm/vm_zone.h>
95
96 #include <sys/random.h>
97 #include <sys/sysctl.h>
98 #include <sys/spinlock.h>
99
100 #include <sys/thread2.h>
101 #include <sys/spinlock2.h>
102
103 /*
104  * Virtual memory maps provide for the mapping, protection, and sharing
105  * of virtual memory objects.  In addition, this module provides for an
106  * efficient virtual copy of memory from one map to another.
107  *
108  * Synchronization is required prior to most operations.
109  *
110  * Maps consist of an ordered doubly-linked list of simple entries.
111  * A hint and a RB tree is used to speed-up lookups.
112  *
113  * Callers looking to modify maps specify start/end addresses which cause
114  * the related map entry to be clipped if necessary, and then later
115  * recombined if the pieces remained compatible.
116  *
117  * Virtual copy operations are performed by copying VM object references
118  * from one map to another, and then marking both regions as copy-on-write.
119  */
120 static boolean_t vmspace_ctor(void *obj, void *privdata, int ocflags);
121 static void vmspace_dtor(void *obj, void *privdata);
122 static void vmspace_terminate(struct vmspace *vm, int final);
123
124 MALLOC_DEFINE(M_VMSPACE, "vmspace", "vmspace objcache backingstore");
125 static struct objcache *vmspace_cache;
126
127 /*
128  * per-cpu page table cross mappings are initialized in early boot
129  * and might require a considerable number of vm_map_entry structures.
130  */
131 #define MAPENTRYBSP_CACHE       (MAXCPU+1)
132 #define MAPENTRYAP_CACHE        8
133
134 static struct vm_zone mapentzone_store;
135 static vm_zone_t mapentzone;
136
137 static struct vm_map_entry map_entry_init[MAX_MAPENT];
138 static struct vm_map_entry cpu_map_entry_init_bsp[MAPENTRYBSP_CACHE];
139 static struct vm_map_entry cpu_map_entry_init_ap[MAXCPU][MAPENTRYAP_CACHE];
140
141 static int randomize_mmap;
142 SYSCTL_INT(_vm, OID_AUTO, randomize_mmap, CTLFLAG_RW, &randomize_mmap, 0,
143     "Randomize mmap offsets");
144 static int vm_map_relock_enable = 1;
145 SYSCTL_INT(_vm, OID_AUTO, map_relock_enable, CTLFLAG_RW,
146            &vm_map_relock_enable, 0, "Randomize mmap offsets");
147
148 static void vmspace_drop_notoken(struct vmspace *vm);
149 static void vm_map_entry_shadow(vm_map_entry_t entry, int addref);
150 static vm_map_entry_t vm_map_entry_create(vm_map_t map, int *);
151 static void vm_map_entry_dispose (vm_map_t map, vm_map_entry_t entry, int *);
152 static void _vm_map_clip_end (vm_map_t, vm_map_entry_t, vm_offset_t, int *);
153 static void _vm_map_clip_start (vm_map_t, vm_map_entry_t, vm_offset_t, int *);
154 static void vm_map_entry_delete (vm_map_t, vm_map_entry_t, int *);
155 static void vm_map_entry_unwire (vm_map_t, vm_map_entry_t);
156 static void vm_map_copy_entry (vm_map_t, vm_map_t, vm_map_entry_t,
157                 vm_map_entry_t);
158 static void vm_map_unclip_range (vm_map_t map, vm_map_entry_t start_entry, vm_offset_t start, vm_offset_t end, int *count, int flags);
159
160 /*
161  * Initialize the vm_map module.  Must be called before any other vm_map
162  * routines.
163  *
164  * Map and entry structures are allocated from the general purpose
165  * memory pool with some exceptions:
166  *
167  *      - The kernel map is allocated statically.
168  *      - Initial kernel map entries are allocated out of a static pool.
169  *      - We must set ZONE_SPECIAL here or the early boot code can get
170  *        stuck if there are >63 cores.
171  *
172  *      These restrictions are necessary since malloc() uses the
173  *      maps and requires map entries.
174  *
175  * Called from the low level boot code only.
176  */
177 void
178 vm_map_startup(void)
179 {
180         mapentzone = &mapentzone_store;
181         zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry),
182                   map_entry_init, MAX_MAPENT);
183         mapentzone_store.zflags |= ZONE_SPECIAL;
184 }
185
186 /*
187  * Called prior to any vmspace allocations.
188  *
189  * Called from the low level boot code only.
190  */
191 void
192 vm_init2(void) 
193 {
194         vmspace_cache = objcache_create_mbacked(M_VMSPACE,
195                                                 sizeof(struct vmspace),
196                                                 0, ncpus * 4,
197                                                 vmspace_ctor, vmspace_dtor,
198                                                 NULL);
199         zinitna(mapentzone, NULL, 0, 0, ZONE_USE_RESERVE | ZONE_SPECIAL);
200         pmap_init2();
201         vm_object_init2();
202 }
203
204 /*
205  * objcache support.  We leave the pmap root cached as long as possible
206  * for performance reasons.
207  */
208 static
209 boolean_t
210 vmspace_ctor(void *obj, void *privdata, int ocflags)
211 {
212         struct vmspace *vm = obj;
213
214         bzero(vm, sizeof(*vm));
215         vm->vm_refcnt = VM_REF_DELETED;
216
217         return 1;
218 }
219
220 static
221 void
222 vmspace_dtor(void *obj, void *privdata)
223 {
224         struct vmspace *vm = obj;
225
226         KKASSERT(vm->vm_refcnt == VM_REF_DELETED);
227         pmap_puninit(vmspace_pmap(vm));
228 }
229
230 /*
231  * Red black tree functions
232  *
233  * The caller must hold the related map lock.
234  */
235 static int rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b);
236 RB_GENERATE(vm_map_rb_tree, vm_map_entry, rb_entry, rb_vm_map_compare);
237
238 /* a->start is address, and the only field has to be initialized */
239 static int
240 rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b)
241 {
242         if (a->start < b->start)
243                 return(-1);
244         else if (a->start > b->start)
245                 return(1);
246         return(0);
247 }
248
249 /*
250  * Initialize vmspace ref/hold counts vmspace0.  There is a holdcnt for
251  * every refcnt.
252  */
253 void
254 vmspace_initrefs(struct vmspace *vm)
255 {
256         vm->vm_refcnt = 1;
257         vm->vm_holdcnt = 1;
258 }
259
260 /*
261  * Allocate a vmspace structure, including a vm_map and pmap.
262  * Initialize numerous fields.  While the initial allocation is zerod,
263  * subsequence reuse from the objcache leaves elements of the structure
264  * intact (particularly the pmap), so portions must be zerod.
265  *
266  * Returns a referenced vmspace.
267  *
268  * No requirements.
269  */
270 struct vmspace *
271 vmspace_alloc(vm_offset_t min, vm_offset_t max)
272 {
273         struct vmspace *vm;
274
275         vm = objcache_get(vmspace_cache, M_WAITOK);
276
277         bzero(&vm->vm_startcopy,
278               (char *)&vm->vm_endcopy - (char *)&vm->vm_startcopy);
279         vm_map_init(&vm->vm_map, min, max, NULL);       /* initializes token */
280
281         /*
282          * NOTE: hold to acquires token for safety.
283          *
284          * On return vmspace is referenced (refs=1, hold=1).  That is,
285          * each refcnt also has a holdcnt.  There can be additional holds
286          * (holdcnt) above and beyond the refcnt.  Finalization is handled in
287          * two stages, one on refs 1->0, and the the second on hold 1->0.
288          */
289         KKASSERT(vm->vm_holdcnt == 0);
290         KKASSERT(vm->vm_refcnt == VM_REF_DELETED);
291         vmspace_initrefs(vm);
292         vmspace_hold(vm);
293         pmap_pinit(vmspace_pmap(vm));           /* (some fields reused) */
294         vm->vm_map.pmap = vmspace_pmap(vm);     /* XXX */
295         vm->vm_shm = NULL;
296         vm->vm_flags = 0;
297         cpu_vmspace_alloc(vm);
298         vmspace_drop(vm);
299
300         return (vm);
301 }
302
303 /*
304  * NOTE: Can return 0 if the vmspace is exiting.
305  */
306 int
307 vmspace_getrefs(struct vmspace *vm)
308 {
309         int32_t n;
310
311         n = vm->vm_refcnt;
312         cpu_ccfence();
313         if (n & VM_REF_DELETED)
314                 n = -1;
315         return n;
316 }
317
318 void
319 vmspace_hold(struct vmspace *vm)
320 {
321         atomic_add_int(&vm->vm_holdcnt, 1);
322         lwkt_gettoken(&vm->vm_map.token);
323 }
324
325 /*
326  * Drop with final termination interlock.
327  */
328 void
329 vmspace_drop(struct vmspace *vm)
330 {
331         lwkt_reltoken(&vm->vm_map.token);
332         vmspace_drop_notoken(vm);
333 }
334
335 static void
336 vmspace_drop_notoken(struct vmspace *vm)
337 {
338         if (atomic_fetchadd_int(&vm->vm_holdcnt, -1) == 1) {
339                 if (vm->vm_refcnt & VM_REF_DELETED)
340                         vmspace_terminate(vm, 1);
341         }
342 }
343
344 /*
345  * A vmspace object must not be in a terminated state to be able to obtain
346  * additional refs on it.
347  *
348  * These are official references to the vmspace, the count is used to check
349  * for vmspace sharing.  Foreign accessors should use 'hold' and not 'ref'.
350  *
351  * XXX we need to combine hold & ref together into one 64-bit field to allow
352  * holds to prevent stage-1 termination.
353  */
354 void
355 vmspace_ref(struct vmspace *vm)
356 {
357         uint32_t n;
358
359         atomic_add_int(&vm->vm_holdcnt, 1);
360         n = atomic_fetchadd_int(&vm->vm_refcnt, 1);
361         KKASSERT((n & VM_REF_DELETED) == 0);
362 }
363
364 /*
365  * Release a ref on the vmspace.  On the 1->0 transition we do stage-1
366  * termination of the vmspace.  Then, on the final drop of the hold we
367  * will do stage-2 final termination.
368  */
369 void
370 vmspace_rel(struct vmspace *vm)
371 {
372         uint32_t n;
373
374         /*
375          * Drop refs.  Each ref also has a hold which is also dropped.
376          *
377          * When refs hits 0 compete to get the VM_REF_DELETED flag (hold
378          * prevent finalization) to start termination processing.
379          * Finalization occurs when the last hold count drops to 0.
380          */
381         n = atomic_fetchadd_int(&vm->vm_refcnt, -1) - 1;
382         while (n == 0) {
383                 if (atomic_cmpset_int(&vm->vm_refcnt, 0, VM_REF_DELETED)) {
384                         vmspace_terminate(vm, 0);
385                         break;
386                 }
387                 n = vm->vm_refcnt;
388                 cpu_ccfence();
389         }
390         vmspace_drop_notoken(vm);
391 }
392
393 /*
394  * This is called during exit indicating that the vmspace is no
395  * longer in used by an exiting process, but the process has not yet
396  * been reaped.
397  *
398  * We drop refs, allowing for stage-1 termination, but maintain a holdcnt
399  * to prevent stage-2 until the process is reaped.  Note hte order of
400  * operation, we must hold first.
401  *
402  * No requirements.
403  */
404 void
405 vmspace_relexit(struct vmspace *vm)
406 {
407         atomic_add_int(&vm->vm_holdcnt, 1);
408         vmspace_rel(vm);
409 }
410
411 /*
412  * Called during reap to disconnect the remainder of the vmspace from
413  * the process.  On the hold drop the vmspace termination is finalized.
414  *
415  * No requirements.
416  */
417 void
418 vmspace_exitfree(struct proc *p)
419 {
420         struct vmspace *vm;
421
422         vm = p->p_vmspace;
423         p->p_vmspace = NULL;
424         vmspace_drop_notoken(vm);
425 }
426
427 /*
428  * Called in two cases:
429  *
430  * (1) When the last refcnt is dropped and the vmspace becomes inactive,
431  *     called with final == 0.  refcnt will be (u_int)-1 at this point,
432  *     and holdcnt will still be non-zero.
433  *
434  * (2) When holdcnt becomes 0, called with final == 1.  There should no
435  *     longer be anyone with access to the vmspace.
436  *
437  * VMSPACE_EXIT1 flags the primary deactivation
438  * VMSPACE_EXIT2 flags the last reap
439  */
440 static void
441 vmspace_terminate(struct vmspace *vm, int final)
442 {
443         int count;
444
445         lwkt_gettoken(&vm->vm_map.token);
446         if (final == 0) {
447                 KKASSERT((vm->vm_flags & VMSPACE_EXIT1) == 0);
448                 vm->vm_flags |= VMSPACE_EXIT1;
449
450                 /*
451                  * Get rid of most of the resources.  Leave the kernel pmap
452                  * intact.
453                  *
454                  * If the pmap does not contain wired pages we can bulk-delete
455                  * the pmap as a performance optimization before removing the
456                  * related mappings.
457                  *
458                  * If the pmap contains wired pages we cannot do this
459                  * pre-optimization because currently vm_fault_unwire()
460                  * expects the pmap pages to exist and will not decrement
461                  * p->wire_count if they do not.
462                  */
463                 shmexit(vm);
464                 if (vmspace_pmap(vm)->pm_stats.wired_count) {
465                         vm_map_remove(&vm->vm_map, VM_MIN_USER_ADDRESS,
466                                       VM_MAX_USER_ADDRESS);
467                         pmap_remove_pages(vmspace_pmap(vm), VM_MIN_USER_ADDRESS,
468                                           VM_MAX_USER_ADDRESS);
469                 } else {
470                         pmap_remove_pages(vmspace_pmap(vm), VM_MIN_USER_ADDRESS,
471                                           VM_MAX_USER_ADDRESS);
472                         vm_map_remove(&vm->vm_map, VM_MIN_USER_ADDRESS,
473                                       VM_MAX_USER_ADDRESS);
474                 }
475                 lwkt_reltoken(&vm->vm_map.token);
476         } else {
477                 KKASSERT((vm->vm_flags & VMSPACE_EXIT1) != 0);
478                 KKASSERT((vm->vm_flags & VMSPACE_EXIT2) == 0);
479
480                 /*
481                  * Get rid of remaining basic resources.
482                  */
483                 vm->vm_flags |= VMSPACE_EXIT2;
484                 shmexit(vm);
485
486                 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
487                 vm_map_lock(&vm->vm_map);
488                 cpu_vmspace_free(vm);
489
490                 /*
491                  * Lock the map, to wait out all other references to it.
492                  * Delete all of the mappings and pages they hold, then call
493                  * the pmap module to reclaim anything left.
494                  */
495                 vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
496                               vm->vm_map.max_offset, &count);
497                 vm_map_unlock(&vm->vm_map);
498                 vm_map_entry_release(count);
499
500                 pmap_release(vmspace_pmap(vm));
501                 lwkt_reltoken(&vm->vm_map.token);
502                 objcache_put(vmspace_cache, vm);
503         }
504 }
505
506 /*
507  * Swap useage is determined by taking the proportional swap used by
508  * VM objects backing the VM map.  To make up for fractional losses,
509  * if the VM object has any swap use at all the associated map entries
510  * count for at least 1 swap page.
511  *
512  * No requirements.
513  */
514 vm_offset_t
515 vmspace_swap_count(struct vmspace *vm)
516 {
517         vm_map_t map = &vm->vm_map;
518         vm_map_entry_t cur;
519         vm_object_t object;
520         vm_offset_t count = 0;
521         vm_offset_t n;
522
523         vmspace_hold(vm);
524         for (cur = map->header.next; cur != &map->header; cur = cur->next) {
525                 switch(cur->maptype) {
526                 case VM_MAPTYPE_NORMAL:
527                 case VM_MAPTYPE_VPAGETABLE:
528                         if ((object = cur->object.vm_object) == NULL)
529                                 break;
530                         if (object->swblock_count) {
531                                 n = (cur->end - cur->start) / PAGE_SIZE;
532                                 count += object->swblock_count *
533                                     SWAP_META_PAGES * n / object->size + 1;
534                         }
535                         break;
536                 default:
537                         break;
538                 }
539         }
540         vmspace_drop(vm);
541
542         return(count);
543 }
544
545 /*
546  * Calculate the approximate number of anonymous pages in use by
547  * this vmspace.  To make up for fractional losses, we count each
548  * VM object as having at least 1 anonymous page.
549  *
550  * No requirements.
551  */
552 vm_offset_t
553 vmspace_anonymous_count(struct vmspace *vm)
554 {
555         vm_map_t map = &vm->vm_map;
556         vm_map_entry_t cur;
557         vm_object_t object;
558         vm_offset_t count = 0;
559
560         vmspace_hold(vm);
561         for (cur = map->header.next; cur != &map->header; cur = cur->next) {
562                 switch(cur->maptype) {
563                 case VM_MAPTYPE_NORMAL:
564                 case VM_MAPTYPE_VPAGETABLE:
565                         if ((object = cur->object.vm_object) == NULL)
566                                 break;
567                         if (object->type != OBJT_DEFAULT &&
568                             object->type != OBJT_SWAP) {
569                                 break;
570                         }
571                         count += object->resident_page_count;
572                         break;
573                 default:
574                         break;
575                 }
576         }
577         vmspace_drop(vm);
578
579         return(count);
580 }
581
582 /*
583  * Initialize an existing vm_map structure such as that in the vmspace
584  * structure.  The pmap is initialized elsewhere.
585  *
586  * No requirements.
587  */
588 void
589 vm_map_init(struct vm_map *map, vm_offset_t min, vm_offset_t max, pmap_t pmap)
590 {
591         map->header.next = map->header.prev = &map->header;
592         RB_INIT(&map->rb_root);
593         spin_init(&map->ilock_spin, "ilock");
594         map->ilock_base = NULL;
595         map->nentries = 0;
596         map->size = 0;
597         map->system_map = 0;
598         map->min_offset = min;
599         map->max_offset = max;
600         map->pmap = pmap;
601         map->timestamp = 0;
602         map->flags = 0;
603         bzero(&map->freehint, sizeof(map->freehint));
604         lwkt_token_init(&map->token, "vm_map");
605         lockinit(&map->lock, "vm_maplk", (hz + 9) / 10, 0);
606 }
607
608 /*
609  * Find the first possible free address for the specified request length.
610  * Returns 0 if we don't have one cached.
611  */
612 static
613 vm_offset_t
614 vm_map_freehint_find(vm_map_t map, vm_size_t length, vm_size_t align)
615 {
616         vm_map_freehint_t *scan;
617
618         scan = &map->freehint[0];
619         while (scan < &map->freehint[VM_MAP_FFCOUNT]) {
620                 if (scan->length == length && scan->align == align)
621                         return(scan->start);
622                 ++scan;
623         }
624         return 0;
625 }
626
627 /*
628  * Unconditionally set the freehint.  Called by vm_map_findspace() after
629  * it finds an address.  This will help us iterate optimally on the next
630  * similar findspace.
631  */
632 static
633 void
634 vm_map_freehint_update(vm_map_t map, vm_offset_t start,
635                        vm_size_t length, vm_size_t align)
636 {
637         vm_map_freehint_t *scan;
638
639         scan = &map->freehint[0];
640         while (scan < &map->freehint[VM_MAP_FFCOUNT]) {
641                 if (scan->length == length && scan->align == align) {
642                         scan->start = start;
643                         return;
644                 }
645                 ++scan;
646         }
647         scan = &map->freehint[map->freehint_newindex & VM_MAP_FFMASK];
648         scan->start = start;
649         scan->align = align;
650         scan->length = length;
651         ++map->freehint_newindex;
652 }
653
654 /*
655  * Update any existing freehints (for any alignment), for the hole we just
656  * added.
657  */
658 static
659 void
660 vm_map_freehint_hole(vm_map_t map, vm_offset_t start, vm_size_t length)
661 {
662         vm_map_freehint_t *scan;
663
664         scan = &map->freehint[0];
665         while (scan < &map->freehint[VM_MAP_FFCOUNT]) {
666                 if (scan->length <= length && scan->start > start)
667                         scan->start = start;
668                 ++scan;
669         }
670 }
671
672 /*
673  * Shadow the vm_map_entry's object.  This typically needs to be done when
674  * a write fault is taken on an entry which had previously been cloned by
675  * fork().  The shared object (which might be NULL) must become private so
676  * we add a shadow layer above it.
677  *
678  * Object allocation for anonymous mappings is defered as long as possible.
679  * When creating a shadow, however, the underlying object must be instantiated
680  * so it can be shared.
681  *
682  * If the map segment is governed by a virtual page table then it is
683  * possible to address offsets beyond the mapped area.  Just allocate
684  * a maximally sized object for this case.
685  *
686  * If addref is non-zero an additional reference is added to the returned
687  * entry.  This mechanic exists because the additional reference might have
688  * to be added atomically and not after return to prevent a premature
689  * collapse.
690  *
691  * The vm_map must be exclusively locked.
692  * No other requirements.
693  */
694 static
695 void
696 vm_map_entry_shadow(vm_map_entry_t entry, int addref)
697 {
698         if (entry->maptype == VM_MAPTYPE_VPAGETABLE) {
699                 vm_object_shadow(&entry->object.vm_object, &entry->offset,
700                                  0x7FFFFFFF, addref);   /* XXX */
701         } else {
702                 vm_object_shadow(&entry->object.vm_object, &entry->offset,
703                                  atop(entry->end - entry->start), addref);
704         }
705         entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
706 }
707
708 /*
709  * Allocate an object for a vm_map_entry.
710  *
711  * Object allocation for anonymous mappings is defered as long as possible.
712  * This function is called when we can defer no longer, generally when a map
713  * entry might be split or forked or takes a page fault.
714  *
715  * If the map segment is governed by a virtual page table then it is
716  * possible to address offsets beyond the mapped area.  Just allocate
717  * a maximally sized object for this case.
718  *
719  * The vm_map must be exclusively locked.
720  * No other requirements.
721  */
722 void 
723 vm_map_entry_allocate_object(vm_map_entry_t entry)
724 {
725         vm_object_t obj;
726
727         if (entry->maptype == VM_MAPTYPE_VPAGETABLE) {
728                 obj = vm_object_allocate(OBJT_DEFAULT, 0x7FFFFFFF); /* XXX */
729         } else {
730                 obj = vm_object_allocate(OBJT_DEFAULT,
731                                          atop(entry->end - entry->start));
732         }
733         entry->object.vm_object = obj;
734         entry->offset = 0;
735 }
736
737 /*
738  * Set an initial negative count so the first attempt to reserve
739  * space preloads a bunch of vm_map_entry's for this cpu.  Also
740  * pre-allocate 2 vm_map_entries which will be needed by zalloc() to
741  * map a new page for vm_map_entry structures.  SMP systems are
742  * particularly sensitive.
743  *
744  * This routine is called in early boot so we cannot just call
745  * vm_map_entry_reserve().
746  *
747  * Called from the low level boot code only (for each cpu)
748  *
749  * WARNING! Take care not to have too-big a static/BSS structure here
750  *          as MAXCPU can be 256+, otherwise the loader's 64MB heap
751  *          can get blown out by the kernel plus the initrd image.
752  */
753 void
754 vm_map_entry_reserve_cpu_init(globaldata_t gd)
755 {
756         vm_map_entry_t entry;
757         int count;
758         int i;
759
760         gd->gd_vme_avail -= MAP_RESERVE_COUNT * 2;
761         if (gd->gd_cpuid == 0) {
762                 entry = &cpu_map_entry_init_bsp[0];
763                 count = MAPENTRYBSP_CACHE;
764         } else {
765                 entry = &cpu_map_entry_init_ap[gd->gd_cpuid][0];
766                 count = MAPENTRYAP_CACHE;
767         }
768         for (i = 0; i < count; ++i, ++entry) {
769                 entry->next = gd->gd_vme_base;
770                 gd->gd_vme_base = entry;
771         }
772 }
773
774 /*
775  * Reserves vm_map_entry structures so code later on can manipulate
776  * map_entry structures within a locked map without blocking trying
777  * to allocate a new vm_map_entry.
778  *
779  * No requirements.
780  */
781 int
782 vm_map_entry_reserve(int count)
783 {
784         struct globaldata *gd = mycpu;
785         vm_map_entry_t entry;
786
787         /*
788          * Make sure we have enough structures in gd_vme_base to handle
789          * the reservation request.
790          *
791          * The critical section protects access to the per-cpu gd.
792          */
793         crit_enter();
794         while (gd->gd_vme_avail < count) {
795                 entry = zalloc(mapentzone);
796                 entry->next = gd->gd_vme_base;
797                 gd->gd_vme_base = entry;
798                 ++gd->gd_vme_avail;
799         }
800         gd->gd_vme_avail -= count;
801         crit_exit();
802
803         return(count);
804 }
805
806 /*
807  * Releases previously reserved vm_map_entry structures that were not
808  * used.  If we have too much junk in our per-cpu cache clean some of
809  * it out.
810  *
811  * No requirements.
812  */
813 void
814 vm_map_entry_release(int count)
815 {
816         struct globaldata *gd = mycpu;
817         vm_map_entry_t entry;
818
819         crit_enter();
820         gd->gd_vme_avail += count;
821         while (gd->gd_vme_avail > MAP_RESERVE_SLOP) {
822                 entry = gd->gd_vme_base;
823                 KKASSERT(entry != NULL);
824                 gd->gd_vme_base = entry->next;
825                 --gd->gd_vme_avail;
826                 crit_exit();
827                 zfree(mapentzone, entry);
828                 crit_enter();
829         }
830         crit_exit();
831 }
832
833 /*
834  * Reserve map entry structures for use in kernel_map itself.  These
835  * entries have *ALREADY* been reserved on a per-cpu basis when the map
836  * was inited.  This function is used by zalloc() to avoid a recursion
837  * when zalloc() itself needs to allocate additional kernel memory.
838  *
839  * This function works like the normal reserve but does not load the
840  * vm_map_entry cache (because that would result in an infinite
841  * recursion).  Note that gd_vme_avail may go negative.  This is expected.
842  *
843  * Any caller of this function must be sure to renormalize after
844  * potentially eating entries to ensure that the reserve supply
845  * remains intact.
846  *
847  * No requirements.
848  */
849 int
850 vm_map_entry_kreserve(int count)
851 {
852         struct globaldata *gd = mycpu;
853
854         crit_enter();
855         gd->gd_vme_avail -= count;
856         crit_exit();
857         KASSERT(gd->gd_vme_base != NULL,
858                 ("no reserved entries left, gd_vme_avail = %d",
859                 gd->gd_vme_avail));
860         return(count);
861 }
862
863 /*
864  * Release previously reserved map entries for kernel_map.  We do not
865  * attempt to clean up like the normal release function as this would
866  * cause an unnecessary (but probably not fatal) deep procedure call.
867  *
868  * No requirements.
869  */
870 void
871 vm_map_entry_krelease(int count)
872 {
873         struct globaldata *gd = mycpu;
874
875         crit_enter();
876         gd->gd_vme_avail += count;
877         crit_exit();
878 }
879
880 /*
881  * Allocates a VM map entry for insertion.  No entry fields are filled in.
882  *
883  * The entries should have previously been reserved.  The reservation count
884  * is tracked in (*countp).
885  *
886  * No requirements.
887  */
888 static vm_map_entry_t
889 vm_map_entry_create(vm_map_t map, int *countp)
890 {
891         struct globaldata *gd = mycpu;
892         vm_map_entry_t entry;
893
894         KKASSERT(*countp > 0);
895         --*countp;
896         crit_enter();
897         entry = gd->gd_vme_base;
898         KASSERT(entry != NULL, ("gd_vme_base NULL! count %d", *countp));
899         gd->gd_vme_base = entry->next;
900         crit_exit();
901
902         return(entry);
903 }
904
905 /*
906  * Dispose of a vm_map_entry that is no longer being referenced.
907  *
908  * No requirements.
909  */
910 static void
911 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry, int *countp)
912 {
913         struct globaldata *gd = mycpu;
914
915         ++*countp;
916         crit_enter();
917         entry->next = gd->gd_vme_base;
918         gd->gd_vme_base = entry;
919         crit_exit();
920 }
921
922
923 /*
924  * Insert/remove entries from maps.
925  *
926  * The related map must be exclusively locked.
927  * The caller must hold map->token
928  * No other requirements.
929  */
930 static __inline void
931 vm_map_entry_link(vm_map_t map,
932                   vm_map_entry_t after_where,
933                   vm_map_entry_t entry)
934 {
935         ASSERT_VM_MAP_LOCKED(map);
936
937         map->nentries++;
938         entry->prev = after_where;
939         entry->next = after_where->next;
940         entry->next->prev = entry;
941         after_where->next = entry;
942         if (vm_map_rb_tree_RB_INSERT(&map->rb_root, entry))
943                 panic("vm_map_entry_link: dup addr map %p ent %p", map, entry);
944 }
945
946 static __inline void
947 vm_map_entry_unlink(vm_map_t map,
948                     vm_map_entry_t entry)
949 {
950         vm_map_entry_t prev;
951         vm_map_entry_t next;
952
953         ASSERT_VM_MAP_LOCKED(map);
954
955         if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
956                 panic("vm_map_entry_unlink: attempt to mess with "
957                       "locked entry! %p", entry);
958         }
959         prev = entry->prev;
960         next = entry->next;
961         next->prev = prev;
962         prev->next = next;
963         vm_map_rb_tree_RB_REMOVE(&map->rb_root, entry);
964         map->nentries--;
965 }
966
967 /*
968  * Finds the map entry containing (or immediately preceding) the specified
969  * address in the given map.  The entry is returned in (*entry).
970  *
971  * The boolean result indicates whether the address is actually contained
972  * in the map.
973  *
974  * The related map must be locked.
975  * No other requirements.
976  */
977 boolean_t
978 vm_map_lookup_entry(vm_map_t map, vm_offset_t address, vm_map_entry_t *entry)
979 {
980         vm_map_entry_t tmp;
981         vm_map_entry_t last;
982
983         ASSERT_VM_MAP_LOCKED(map);
984
985         /*
986          * Locate the record from the top of the tree.  'last' tracks the
987          * closest prior record and is returned if no match is found, which
988          * in binary tree terms means tracking the most recent right-branch
989          * taken.  If there is no prior record, &map->header is returned.
990          */
991         last = &map->header;
992         tmp = RB_ROOT(&map->rb_root);
993
994         while (tmp) {
995                 if (address >= tmp->start) {
996                         if (address < tmp->end) {
997                                 *entry = tmp;
998                                 return(TRUE);
999                         }
1000                         last = tmp;
1001                         tmp = RB_RIGHT(tmp, rb_entry);
1002                 } else {
1003                         tmp = RB_LEFT(tmp, rb_entry);
1004                 }
1005         }
1006         *entry = last;
1007         return (FALSE);
1008 }
1009
1010 /*
1011  * Inserts the given whole VM object into the target map at the specified
1012  * address range.  The object's size should match that of the address range.
1013  *
1014  * The map must be exclusively locked.
1015  * The object must be held.
1016  * The caller must have reserved sufficient vm_map_entry structures.
1017  *
1018  * If object is non-NULL, ref count must be bumped by caller prior to
1019  * making call to account for the new entry.
1020  */
1021 int
1022 vm_map_insert(vm_map_t map, int *countp, void *map_object, void *map_aux,
1023               vm_ooffset_t offset, vm_offset_t start, vm_offset_t end,
1024               vm_maptype_t maptype, vm_subsys_t id,
1025               vm_prot_t prot, vm_prot_t max, int cow)
1026 {
1027         vm_map_entry_t new_entry;
1028         vm_map_entry_t prev_entry;
1029         vm_map_entry_t temp_entry;
1030         vm_eflags_t protoeflags;
1031         int must_drop = 0;
1032         vm_object_t object;
1033
1034         if (maptype == VM_MAPTYPE_UKSMAP)
1035                 object = NULL;
1036         else
1037                 object = map_object;
1038
1039         ASSERT_VM_MAP_LOCKED(map);
1040         if (object)
1041                 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
1042
1043         /*
1044          * Check that the start and end points are not bogus.
1045          */
1046         if ((start < map->min_offset) || (end > map->max_offset) ||
1047             (start >= end))
1048                 return (KERN_INVALID_ADDRESS);
1049
1050         /*
1051          * Find the entry prior to the proposed starting address; if it's part
1052          * of an existing entry, this range is bogus.
1053          */
1054         if (vm_map_lookup_entry(map, start, &temp_entry))
1055                 return (KERN_NO_SPACE);
1056
1057         prev_entry = temp_entry;
1058
1059         /*
1060          * Assert that the next entry doesn't overlap the end point.
1061          */
1062
1063         if ((prev_entry->next != &map->header) &&
1064             (prev_entry->next->start < end))
1065                 return (KERN_NO_SPACE);
1066
1067         protoeflags = 0;
1068
1069         if (cow & MAP_COPY_ON_WRITE)
1070                 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
1071
1072         if (cow & MAP_NOFAULT) {
1073                 protoeflags |= MAP_ENTRY_NOFAULT;
1074
1075                 KASSERT(object == NULL,
1076                         ("vm_map_insert: paradoxical MAP_NOFAULT request"));
1077         }
1078         if (cow & MAP_DISABLE_SYNCER)
1079                 protoeflags |= MAP_ENTRY_NOSYNC;
1080         if (cow & MAP_DISABLE_COREDUMP)
1081                 protoeflags |= MAP_ENTRY_NOCOREDUMP;
1082         if (cow & MAP_IS_STACK)
1083                 protoeflags |= MAP_ENTRY_STACK;
1084         if (cow & MAP_IS_KSTACK)
1085                 protoeflags |= MAP_ENTRY_KSTACK;
1086
1087         lwkt_gettoken(&map->token);
1088
1089         if (object) {
1090                 /*
1091                  * When object is non-NULL, it could be shared with another
1092                  * process.  We have to set or clear OBJ_ONEMAPPING 
1093                  * appropriately.
1094                  *
1095                  * NOTE: This flag is only applicable to DEFAULT and SWAP
1096                  *       objects and will already be clear in other types
1097                  *       of objects, so a shared object lock is ok for
1098                  *       VNODE objects.
1099                  */
1100                 if ((object->ref_count > 1) || (object->shadow_count != 0)) {
1101                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
1102                 }
1103         }
1104         else if ((prev_entry != &map->header) &&
1105                  (prev_entry->eflags == protoeflags) &&
1106                  (prev_entry->end == start) &&
1107                  (prev_entry->wired_count == 0) &&
1108                  (prev_entry->id == id) &&
1109                  prev_entry->maptype == maptype &&
1110                  maptype == VM_MAPTYPE_NORMAL &&
1111                  ((prev_entry->object.vm_object == NULL) ||
1112                   vm_object_coalesce(prev_entry->object.vm_object,
1113                                      OFF_TO_IDX(prev_entry->offset),
1114                                      (vm_size_t)(prev_entry->end - prev_entry->start),
1115                                      (vm_size_t)(end - prev_entry->end)))) {
1116                 /*
1117                  * We were able to extend the object.  Determine if we
1118                  * can extend the previous map entry to include the 
1119                  * new range as well.
1120                  */
1121                 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
1122                     (prev_entry->protection == prot) &&
1123                     (prev_entry->max_protection == max)) {
1124                         map->size += (end - prev_entry->end);
1125                         prev_entry->end = end;
1126                         vm_map_simplify_entry(map, prev_entry, countp);
1127                         lwkt_reltoken(&map->token);
1128                         return (KERN_SUCCESS);
1129                 }
1130
1131                 /*
1132                  * If we can extend the object but cannot extend the
1133                  * map entry, we have to create a new map entry.  We
1134                  * must bump the ref count on the extended object to
1135                  * account for it.  object may be NULL.
1136                  *
1137                  * XXX if object is NULL should we set offset to 0 here ?
1138                  */
1139                 object = prev_entry->object.vm_object;
1140                 offset = prev_entry->offset +
1141                         (prev_entry->end - prev_entry->start);
1142                 if (object) {
1143                         vm_object_hold(object);
1144                         vm_object_chain_wait(object, 0);
1145                         vm_object_reference_locked(object);
1146                         must_drop = 1;
1147                         map_object = object;
1148                 }
1149         }
1150
1151         /*
1152          * NOTE: if conditionals fail, object can be NULL here.  This occurs
1153          * in things like the buffer map where we manage kva but do not manage
1154          * backing objects.
1155          */
1156
1157         /*
1158          * Create a new entry
1159          */
1160
1161         new_entry = vm_map_entry_create(map, countp);
1162         new_entry->start = start;
1163         new_entry->end = end;
1164         new_entry->id = id;
1165
1166         new_entry->maptype = maptype;
1167         new_entry->eflags = protoeflags;
1168         new_entry->object.map_object = map_object;
1169         new_entry->aux.master_pde = 0;          /* in case size is different */
1170         new_entry->aux.map_aux = map_aux;
1171         new_entry->offset = offset;
1172
1173         new_entry->inheritance = VM_INHERIT_DEFAULT;
1174         new_entry->protection = prot;
1175         new_entry->max_protection = max;
1176         new_entry->wired_count = 0;
1177
1178         /*
1179          * Insert the new entry into the list
1180          */
1181
1182         vm_map_entry_link(map, prev_entry, new_entry);
1183         map->size += new_entry->end - new_entry->start;
1184
1185         /*
1186          * Don't worry about updating freehint[] when inserting, allow
1187          * addresses to be lower than the actual first free spot.
1188          */
1189 #if 0
1190         /*
1191          * Temporarily removed to avoid MAP_STACK panic, due to
1192          * MAP_STACK being a huge hack.  Will be added back in
1193          * when MAP_STACK (and the user stack mapping) is fixed.
1194          */
1195         /*
1196          * It may be possible to simplify the entry
1197          */
1198         vm_map_simplify_entry(map, new_entry, countp);
1199 #endif
1200
1201         /*
1202          * Try to pre-populate the page table.  Mappings governed by virtual
1203          * page tables cannot be prepopulated without a lot of work, so
1204          * don't try.
1205          */
1206         if ((cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) &&
1207             maptype != VM_MAPTYPE_VPAGETABLE &&
1208             maptype != VM_MAPTYPE_UKSMAP) {
1209                 int dorelock = 0;
1210                 if (vm_map_relock_enable && (cow & MAP_PREFAULT_RELOCK)) {
1211                         dorelock = 1;
1212                         vm_object_lock_swap();
1213                         vm_object_drop(object);
1214                 }
1215                 pmap_object_init_pt(map->pmap, start, prot,
1216                                     object, OFF_TO_IDX(offset), end - start,
1217                                     cow & MAP_PREFAULT_PARTIAL);
1218                 if (dorelock) {
1219                         vm_object_hold(object);
1220                         vm_object_lock_swap();
1221                 }
1222         }
1223         if (must_drop)
1224                 vm_object_drop(object);
1225
1226         lwkt_reltoken(&map->token);
1227         return (KERN_SUCCESS);
1228 }
1229
1230 /*
1231  * Find sufficient space for `length' bytes in the given map, starting at
1232  * `start'.  Returns 0 on success, 1 on no space.
1233  *
1234  * This function will returned an arbitrarily aligned pointer.  If no
1235  * particular alignment is required you should pass align as 1.  Note that
1236  * the map may return PAGE_SIZE aligned pointers if all the lengths used in
1237  * the map are a multiple of PAGE_SIZE, even if you pass a smaller align
1238  * argument.
1239  *
1240  * 'align' should be a power of 2 but is not required to be.
1241  *
1242  * The map must be exclusively locked.
1243  * No other requirements.
1244  */
1245 int
1246 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length,
1247                  vm_size_t align, int flags, vm_offset_t *addr)
1248 {
1249         vm_map_entry_t entry, next;
1250         vm_map_entry_t tmp;
1251         vm_offset_t hole_start;
1252         vm_offset_t end;
1253         vm_offset_t align_mask;
1254
1255         if (start < map->min_offset)
1256                 start = map->min_offset;
1257         if (start > map->max_offset)
1258                 return (1);
1259
1260         /*
1261          * If the alignment is not a power of 2 we will have to use
1262          * a mod/division, set align_mask to a special value.
1263          */
1264         if ((align | (align - 1)) + 1 != (align << 1))
1265                 align_mask = (vm_offset_t)-1;
1266         else
1267                 align_mask = align - 1;
1268
1269         /*
1270          * Use freehint to adjust the start point, hopefully reducing
1271          * the iteration to O(1).
1272          */
1273         hole_start = vm_map_freehint_find(map, length, align);
1274         if (start < hole_start)
1275                 start = hole_start;
1276         if (vm_map_lookup_entry(map, start, &tmp))
1277                 start = tmp->end;
1278         entry = tmp;
1279
1280         /*
1281          * Look through the rest of the map, trying to fit a new region in the
1282          * gap between existing regions, or after the very last region.
1283          */
1284         for (;; start = (entry = next)->end) {
1285                 /*
1286                  * Adjust the proposed start by the requested alignment,
1287                  * be sure that we didn't wrap the address.
1288                  */
1289                 if (align_mask == (vm_offset_t)-1)
1290                         end = roundup(start, align);
1291                 else
1292                         end = (start + align_mask) & ~align_mask;
1293                 if (end < start)
1294                         return (1);
1295                 start = end;
1296
1297                 /*
1298                  * Find the end of the proposed new region.  Be sure we didn't
1299                  * go beyond the end of the map, or wrap around the address.
1300                  * Then check to see if this is the last entry or if the 
1301                  * proposed end fits in the gap between this and the next
1302                  * entry.
1303                  */
1304                 end = start + length;
1305                 if (end > map->max_offset || end < start)
1306                         return (1);
1307                 next = entry->next;
1308
1309                 /*
1310                  * If the next entry's start address is beyond the desired
1311                  * end address we may have found a good entry.
1312                  *
1313                  * If the next entry is a stack mapping we do not map into
1314                  * the stack's reserved space.
1315                  *
1316                  * XXX continue to allow mapping into the stack's reserved
1317                  * space if doing a MAP_STACK mapping inside a MAP_STACK
1318                  * mapping, for backwards compatibility.  But the caller
1319                  * really should use MAP_STACK | MAP_TRYFIXED if they
1320                  * want to do that.
1321                  */
1322                 if (next == &map->header)
1323                         break;
1324                 if (next->start >= end) {
1325                         if ((next->eflags & MAP_ENTRY_STACK) == 0)
1326                                 break;
1327                         if (flags & MAP_STACK)
1328                                 break;
1329                         if (next->start - next->aux.avail_ssize >= end)
1330                                 break;
1331                 }
1332         }
1333
1334         /*
1335          * Update the freehint
1336          */
1337         vm_map_freehint_update(map, start, length, align);
1338
1339         /*
1340          * Grow the kernel_map if necessary.  pmap_growkernel() will panic
1341          * if it fails.  The kernel_map is locked and nothing can steal
1342          * our address space if pmap_growkernel() blocks.
1343          *
1344          * NOTE: This may be unconditionally called for kldload areas on
1345          *       x86_64 because these do not bump kernel_vm_end (which would
1346          *       fill 128G worth of page tables!).  Therefore we must not
1347          *       retry.
1348          */
1349         if (map == &kernel_map) {
1350                 vm_offset_t kstop;
1351
1352                 kstop = round_page(start + length);
1353                 if (kstop > kernel_vm_end)
1354                         pmap_growkernel(start, kstop);
1355         }
1356         *addr = start;
1357         return (0);
1358 }
1359
1360 /*
1361  * vm_map_find finds an unallocated region in the target address map with
1362  * the given length and allocates it.  The search is defined to be first-fit
1363  * from the specified address; the region found is returned in the same
1364  * parameter.
1365  *
1366  * If object is non-NULL, ref count must be bumped by caller
1367  * prior to making call to account for the new entry.
1368  *
1369  * No requirements.  This function will lock the map temporarily.
1370  */
1371 int
1372 vm_map_find(vm_map_t map, void *map_object, void *map_aux,
1373             vm_ooffset_t offset, vm_offset_t *addr,
1374             vm_size_t length, vm_size_t align, boolean_t fitit,
1375             vm_maptype_t maptype, vm_subsys_t id,
1376             vm_prot_t prot, vm_prot_t max, int cow)
1377 {
1378         vm_offset_t start;
1379         vm_object_t object;
1380         int result;
1381         int count;
1382
1383         if (maptype == VM_MAPTYPE_UKSMAP)
1384                 object = NULL;
1385         else
1386                 object = map_object;
1387
1388         start = *addr;
1389
1390         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1391         vm_map_lock(map);
1392         if (object)
1393                 vm_object_hold_shared(object);
1394         if (fitit) {
1395                 if (vm_map_findspace(map, start, length, align, 0, addr)) {
1396                         if (object)
1397                                 vm_object_drop(object);
1398                         vm_map_unlock(map);
1399                         vm_map_entry_release(count);
1400                         return (KERN_NO_SPACE);
1401                 }
1402                 start = *addr;
1403         }
1404         result = vm_map_insert(map, &count, map_object, map_aux,
1405                                offset, start, start + length,
1406                                maptype, id, prot, max, cow);
1407         if (object)
1408                 vm_object_drop(object);
1409         vm_map_unlock(map);
1410         vm_map_entry_release(count);
1411
1412         return (result);
1413 }
1414
1415 /*
1416  * Simplify the given map entry by merging with either neighbor.  This
1417  * routine also has the ability to merge with both neighbors.
1418  *
1419  * This routine guarentees that the passed entry remains valid (though
1420  * possibly extended).  When merging, this routine may delete one or
1421  * both neighbors.  No action is taken on entries which have their
1422  * in-transition flag set.
1423  *
1424  * The map must be exclusively locked.
1425  */
1426 void
1427 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry, int *countp)
1428 {
1429         vm_map_entry_t next, prev;
1430         vm_size_t prevsize, esize;
1431
1432         if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1433                 ++mycpu->gd_cnt.v_intrans_coll;
1434                 return;
1435         }
1436
1437         if (entry->maptype == VM_MAPTYPE_SUBMAP)
1438                 return;
1439         if (entry->maptype == VM_MAPTYPE_UKSMAP)
1440                 return;
1441
1442         prev = entry->prev;
1443         if (prev != &map->header) {
1444                 prevsize = prev->end - prev->start;
1445                 if ( (prev->end == entry->start) &&
1446                      (prev->maptype == entry->maptype) &&
1447                      (prev->object.vm_object == entry->object.vm_object) &&
1448                      (!prev->object.vm_object ||
1449                         (prev->offset + prevsize == entry->offset)) &&
1450                      (prev->eflags == entry->eflags) &&
1451                      (prev->protection == entry->protection) &&
1452                      (prev->max_protection == entry->max_protection) &&
1453                      (prev->inheritance == entry->inheritance) &&
1454                      (prev->id == entry->id) &&
1455                      (prev->wired_count == entry->wired_count)) {
1456                         vm_map_entry_unlink(map, prev);
1457                         entry->start = prev->start;
1458                         entry->offset = prev->offset;
1459                         if (prev->object.vm_object)
1460                                 vm_object_deallocate(prev->object.vm_object);
1461                         vm_map_entry_dispose(map, prev, countp);
1462                 }
1463         }
1464
1465         next = entry->next;
1466         if (next != &map->header) {
1467                 esize = entry->end - entry->start;
1468                 if ((entry->end == next->start) &&
1469                     (next->maptype == entry->maptype) &&
1470                     (next->object.vm_object == entry->object.vm_object) &&
1471                      (!entry->object.vm_object ||
1472                         (entry->offset + esize == next->offset)) &&
1473                     (next->eflags == entry->eflags) &&
1474                     (next->protection == entry->protection) &&
1475                     (next->max_protection == entry->max_protection) &&
1476                     (next->inheritance == entry->inheritance) &&
1477                     (next->id == entry->id) &&
1478                     (next->wired_count == entry->wired_count)) {
1479                         vm_map_entry_unlink(map, next);
1480                         entry->end = next->end;
1481                         if (next->object.vm_object)
1482                                 vm_object_deallocate(next->object.vm_object);
1483                         vm_map_entry_dispose(map, next, countp);
1484                 }
1485         }
1486 }
1487
1488 /*
1489  * Asserts that the given entry begins at or after the specified address.
1490  * If necessary, it splits the entry into two.
1491  */
1492 #define vm_map_clip_start(map, entry, startaddr, countp)                \
1493 {                                                                       \
1494         if (startaddr > entry->start)                                   \
1495                 _vm_map_clip_start(map, entry, startaddr, countp);      \
1496 }
1497
1498 /*
1499  * This routine is called only when it is known that the entry must be split.
1500  *
1501  * The map must be exclusively locked.
1502  */
1503 static void
1504 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start,
1505                    int *countp)
1506 {
1507         vm_map_entry_t new_entry;
1508
1509         /*
1510          * Split off the front portion -- note that we must insert the new
1511          * entry BEFORE this one, so that this entry has the specified
1512          * starting address.
1513          */
1514
1515         vm_map_simplify_entry(map, entry, countp);
1516
1517         /*
1518          * If there is no object backing this entry, we might as well create
1519          * one now.  If we defer it, an object can get created after the map
1520          * is clipped, and individual objects will be created for the split-up
1521          * map.  This is a bit of a hack, but is also about the best place to
1522          * put this improvement.
1523          */
1524         if (entry->object.vm_object == NULL && !map->system_map) {
1525                 vm_map_entry_allocate_object(entry);
1526         }
1527
1528         new_entry = vm_map_entry_create(map, countp);
1529         *new_entry = *entry;
1530
1531         new_entry->end = start;
1532         entry->offset += (start - entry->start);
1533         entry->start = start;
1534
1535         vm_map_entry_link(map, entry->prev, new_entry);
1536
1537         switch(entry->maptype) {
1538         case VM_MAPTYPE_NORMAL:
1539         case VM_MAPTYPE_VPAGETABLE:
1540                 if (new_entry->object.vm_object) {
1541                         vm_object_hold(new_entry->object.vm_object);
1542                         vm_object_chain_wait(new_entry->object.vm_object, 0);
1543                         vm_object_reference_locked(new_entry->object.vm_object);
1544                         vm_object_drop(new_entry->object.vm_object);
1545                 }
1546                 break;
1547         default:
1548                 break;
1549         }
1550 }
1551
1552 /*
1553  * Asserts that the given entry ends at or before the specified address.
1554  * If necessary, it splits the entry into two.
1555  *
1556  * The map must be exclusively locked.
1557  */
1558 #define vm_map_clip_end(map, entry, endaddr, countp)            \
1559 {                                                               \
1560         if (endaddr < entry->end)                               \
1561                 _vm_map_clip_end(map, entry, endaddr, countp);  \
1562 }
1563
1564 /*
1565  * This routine is called only when it is known that the entry must be split.
1566  *
1567  * The map must be exclusively locked.
1568  */
1569 static void
1570 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end,
1571                  int *countp)
1572 {
1573         vm_map_entry_t new_entry;
1574
1575         /*
1576          * If there is no object backing this entry, we might as well create
1577          * one now.  If we defer it, an object can get created after the map
1578          * is clipped, and individual objects will be created for the split-up
1579          * map.  This is a bit of a hack, but is also about the best place to
1580          * put this improvement.
1581          */
1582
1583         if (entry->object.vm_object == NULL && !map->system_map) {
1584                 vm_map_entry_allocate_object(entry);
1585         }
1586
1587         /*
1588          * Create a new entry and insert it AFTER the specified entry
1589          */
1590
1591         new_entry = vm_map_entry_create(map, countp);
1592         *new_entry = *entry;
1593
1594         new_entry->start = entry->end = end;
1595         new_entry->offset += (end - entry->start);
1596
1597         vm_map_entry_link(map, entry, new_entry);
1598
1599         switch(entry->maptype) {
1600         case VM_MAPTYPE_NORMAL:
1601         case VM_MAPTYPE_VPAGETABLE:
1602                 if (new_entry->object.vm_object) {
1603                         vm_object_hold(new_entry->object.vm_object);
1604                         vm_object_chain_wait(new_entry->object.vm_object, 0);
1605                         vm_object_reference_locked(new_entry->object.vm_object);
1606                         vm_object_drop(new_entry->object.vm_object);
1607                 }
1608                 break;
1609         default:
1610                 break;
1611         }
1612 }
1613
1614 /*
1615  * Asserts that the starting and ending region addresses fall within the
1616  * valid range for the map.
1617  */
1618 #define VM_MAP_RANGE_CHECK(map, start, end)     \
1619 {                                               \
1620         if (start < vm_map_min(map))            \
1621                 start = vm_map_min(map);        \
1622         if (end > vm_map_max(map))              \
1623                 end = vm_map_max(map);          \
1624         if (start > end)                        \
1625                 start = end;                    \
1626 }
1627
1628 /*
1629  * Used to block when an in-transition collison occurs.  The map
1630  * is unlocked for the sleep and relocked before the return.
1631  */
1632 void
1633 vm_map_transition_wait(vm_map_t map)
1634 {
1635         tsleep_interlock(map, 0);
1636         vm_map_unlock(map);
1637         tsleep(map, PINTERLOCKED, "vment", 0);
1638         vm_map_lock(map);
1639 }
1640
1641 /*
1642  * When we do blocking operations with the map lock held it is
1643  * possible that a clip might have occured on our in-transit entry,
1644  * requiring an adjustment to the entry in our loop.  These macros
1645  * help the pageable and clip_range code deal with the case.  The
1646  * conditional costs virtually nothing if no clipping has occured.
1647  */
1648
1649 #define CLIP_CHECK_BACK(entry, save_start)              \
1650     do {                                                \
1651             while (entry->start != save_start) {        \
1652                     entry = entry->prev;                \
1653                     KASSERT(entry != &map->header, ("bad entry clip")); \
1654             }                                           \
1655     } while(0)
1656
1657 #define CLIP_CHECK_FWD(entry, save_end)                 \
1658     do {                                                \
1659             while (entry->end != save_end) {            \
1660                     entry = entry->next;                \
1661                     KASSERT(entry != &map->header, ("bad entry clip")); \
1662             }                                           \
1663     } while(0)
1664
1665
1666 /*
1667  * Clip the specified range and return the base entry.  The
1668  * range may cover several entries starting at the returned base
1669  * and the first and last entry in the covering sequence will be
1670  * properly clipped to the requested start and end address.
1671  *
1672  * If no holes are allowed you should pass the MAP_CLIP_NO_HOLES
1673  * flag.
1674  *
1675  * The MAP_ENTRY_IN_TRANSITION flag will be set for the entries
1676  * covered by the requested range.
1677  *
1678  * The map must be exclusively locked on entry and will remain locked
1679  * on return. If no range exists or the range contains holes and you
1680  * specified that no holes were allowed, NULL will be returned.  This
1681  * routine may temporarily unlock the map in order avoid a deadlock when
1682  * sleeping.
1683  */
1684 static
1685 vm_map_entry_t
1686 vm_map_clip_range(vm_map_t map, vm_offset_t start, vm_offset_t end, 
1687                   int *countp, int flags)
1688 {
1689         vm_map_entry_t start_entry;
1690         vm_map_entry_t entry;
1691
1692         /*
1693          * Locate the entry and effect initial clipping.  The in-transition
1694          * case does not occur very often so do not try to optimize it.
1695          */
1696 again:
1697         if (vm_map_lookup_entry(map, start, &start_entry) == FALSE)
1698                 return (NULL);
1699         entry = start_entry;
1700         if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1701                 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1702                 ++mycpu->gd_cnt.v_intrans_coll;
1703                 ++mycpu->gd_cnt.v_intrans_wait;
1704                 vm_map_transition_wait(map);
1705                 /*
1706                  * entry and/or start_entry may have been clipped while
1707                  * we slept, or may have gone away entirely.  We have
1708                  * to restart from the lookup.
1709                  */
1710                 goto again;
1711         }
1712
1713         /*
1714          * Since we hold an exclusive map lock we do not have to restart
1715          * after clipping, even though clipping may block in zalloc.
1716          */
1717         vm_map_clip_start(map, entry, start, countp);
1718         vm_map_clip_end(map, entry, end, countp);
1719         entry->eflags |= MAP_ENTRY_IN_TRANSITION;
1720
1721         /*
1722          * Scan entries covered by the range.  When working on the next
1723          * entry a restart need only re-loop on the current entry which
1724          * we have already locked, since 'next' may have changed.  Also,
1725          * even though entry is safe, it may have been clipped so we
1726          * have to iterate forwards through the clip after sleeping.
1727          */
1728         while (entry->next != &map->header && entry->next->start < end) {
1729                 vm_map_entry_t next = entry->next;
1730
1731                 if (flags & MAP_CLIP_NO_HOLES) {
1732                         if (next->start > entry->end) {
1733                                 vm_map_unclip_range(map, start_entry,
1734                                         start, entry->end, countp, flags);
1735                                 return(NULL);
1736                         }
1737                 }
1738
1739                 if (next->eflags & MAP_ENTRY_IN_TRANSITION) {
1740                         vm_offset_t save_end = entry->end;
1741                         next->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1742                         ++mycpu->gd_cnt.v_intrans_coll;
1743                         ++mycpu->gd_cnt.v_intrans_wait;
1744                         vm_map_transition_wait(map);
1745
1746                         /*
1747                          * clips might have occured while we blocked.
1748                          */
1749                         CLIP_CHECK_FWD(entry, save_end);
1750                         CLIP_CHECK_BACK(start_entry, start);
1751                         continue;
1752                 }
1753                 /*
1754                  * No restart necessary even though clip_end may block, we
1755                  * are holding the map lock.
1756                  */
1757                 vm_map_clip_end(map, next, end, countp);
1758                 next->eflags |= MAP_ENTRY_IN_TRANSITION;
1759                 entry = next;
1760         }
1761         if (flags & MAP_CLIP_NO_HOLES) {
1762                 if (entry->end != end) {
1763                         vm_map_unclip_range(map, start_entry,
1764                                 start, entry->end, countp, flags);
1765                         return(NULL);
1766                 }
1767         }
1768         return(start_entry);
1769 }
1770
1771 /*
1772  * Undo the effect of vm_map_clip_range().  You should pass the same
1773  * flags and the same range that you passed to vm_map_clip_range().
1774  * This code will clear the in-transition flag on the entries and
1775  * wake up anyone waiting.  This code will also simplify the sequence
1776  * and attempt to merge it with entries before and after the sequence.
1777  *
1778  * The map must be locked on entry and will remain locked on return.
1779  *
1780  * Note that you should also pass the start_entry returned by
1781  * vm_map_clip_range().  However, if you block between the two calls
1782  * with the map unlocked please be aware that the start_entry may
1783  * have been clipped and you may need to scan it backwards to find
1784  * the entry corresponding with the original start address.  You are
1785  * responsible for this, vm_map_unclip_range() expects the correct
1786  * start_entry to be passed to it and will KASSERT otherwise.
1787  */
1788 static
1789 void
1790 vm_map_unclip_range(vm_map_t map, vm_map_entry_t start_entry,
1791                     vm_offset_t start, vm_offset_t end,
1792                     int *countp, int flags)
1793 {
1794         vm_map_entry_t entry;
1795
1796         entry = start_entry;
1797
1798         KASSERT(entry->start == start, ("unclip_range: illegal base entry"));
1799         while (entry != &map->header && entry->start < end) {
1800                 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
1801                         ("in-transition flag not set during unclip on: %p",
1802                         entry));
1803                 KASSERT(entry->end <= end,
1804                         ("unclip_range: tail wasn't clipped"));
1805                 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
1806                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
1807                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
1808                         wakeup(map);
1809                 }
1810                 entry = entry->next;
1811         }
1812
1813         /*
1814          * Simplification does not block so there is no restart case.
1815          */
1816         entry = start_entry;
1817         while (entry != &map->header && entry->start < end) {
1818                 vm_map_simplify_entry(map, entry, countp);
1819                 entry = entry->next;
1820         }
1821 }
1822
1823 /*
1824  * Mark the given range as handled by a subordinate map.
1825  *
1826  * This range must have been created with vm_map_find(), and no other
1827  * operations may have been performed on this range prior to calling
1828  * vm_map_submap().
1829  *
1830  * Submappings cannot be removed.
1831  *
1832  * No requirements.
1833  */
1834 int
1835 vm_map_submap(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_map_t submap)
1836 {
1837         vm_map_entry_t entry;
1838         int result = KERN_INVALID_ARGUMENT;
1839         int count;
1840
1841         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1842         vm_map_lock(map);
1843
1844         VM_MAP_RANGE_CHECK(map, start, end);
1845
1846         if (vm_map_lookup_entry(map, start, &entry)) {
1847                 vm_map_clip_start(map, entry, start, &count);
1848         } else {
1849                 entry = entry->next;
1850         }
1851
1852         vm_map_clip_end(map, entry, end, &count);
1853
1854         if ((entry->start == start) && (entry->end == end) &&
1855             ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1856             (entry->object.vm_object == NULL)) {
1857                 entry->object.sub_map = submap;
1858                 entry->maptype = VM_MAPTYPE_SUBMAP;
1859                 result = KERN_SUCCESS;
1860         }
1861         vm_map_unlock(map);
1862         vm_map_entry_release(count);
1863
1864         return (result);
1865 }
1866
1867 /*
1868  * Sets the protection of the specified address region in the target map. 
1869  * If "set_max" is specified, the maximum protection is to be set;
1870  * otherwise, only the current protection is affected.
1871  *
1872  * The protection is not applicable to submaps, but is applicable to normal
1873  * maps and maps governed by virtual page tables.  For example, when operating
1874  * on a virtual page table our protection basically controls how COW occurs
1875  * on the backing object, whereas the virtual page table abstraction itself
1876  * is an abstraction for userland.
1877  *
1878  * No requirements.
1879  */
1880 int
1881 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1882                vm_prot_t new_prot, boolean_t set_max)
1883 {
1884         vm_map_entry_t current;
1885         vm_map_entry_t entry;
1886         int count;
1887
1888         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1889         vm_map_lock(map);
1890
1891         VM_MAP_RANGE_CHECK(map, start, end);
1892
1893         if (vm_map_lookup_entry(map, start, &entry)) {
1894                 vm_map_clip_start(map, entry, start, &count);
1895         } else {
1896                 entry = entry->next;
1897         }
1898
1899         /*
1900          * Make a first pass to check for protection violations.
1901          */
1902         current = entry;
1903         while ((current != &map->header) && (current->start < end)) {
1904                 if (current->maptype == VM_MAPTYPE_SUBMAP) {
1905                         vm_map_unlock(map);
1906                         vm_map_entry_release(count);
1907                         return (KERN_INVALID_ARGUMENT);
1908                 }
1909                 if ((new_prot & current->max_protection) != new_prot) {
1910                         vm_map_unlock(map);
1911                         vm_map_entry_release(count);
1912                         return (KERN_PROTECTION_FAILURE);
1913                 }
1914                 current = current->next;
1915         }
1916
1917         /*
1918          * Go back and fix up protections. [Note that clipping is not
1919          * necessary the second time.]
1920          */
1921         current = entry;
1922
1923         while ((current != &map->header) && (current->start < end)) {
1924                 vm_prot_t old_prot;
1925
1926                 vm_map_clip_end(map, current, end, &count);
1927
1928                 old_prot = current->protection;
1929                 if (set_max) {
1930                         current->max_protection = new_prot;
1931                         current->protection = new_prot & old_prot;
1932                 } else {
1933                         current->protection = new_prot;
1934                 }
1935
1936                 /*
1937                  * Update physical map if necessary. Worry about copy-on-write
1938                  * here -- CHECK THIS XXX
1939                  */
1940
1941                 if (current->protection != old_prot) {
1942 #define MASK(entry)     (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1943                                                         VM_PROT_ALL)
1944
1945                         pmap_protect(map->pmap, current->start,
1946                             current->end,
1947                             current->protection & MASK(current));
1948 #undef  MASK
1949                 }
1950
1951                 vm_map_simplify_entry(map, current, &count);
1952
1953                 current = current->next;
1954         }
1955
1956         vm_map_unlock(map);
1957         vm_map_entry_release(count);
1958         return (KERN_SUCCESS);
1959 }
1960
1961 /*
1962  * This routine traverses a processes map handling the madvise
1963  * system call.  Advisories are classified as either those effecting
1964  * the vm_map_entry structure, or those effecting the underlying
1965  * objects.
1966  *
1967  * The <value> argument is used for extended madvise calls.
1968  *
1969  * No requirements.
1970  */
1971 int
1972 vm_map_madvise(vm_map_t map, vm_offset_t start, vm_offset_t end,
1973                int behav, off_t value)
1974 {
1975         vm_map_entry_t current, entry;
1976         int modify_map = 0;
1977         int error = 0;
1978         int count;
1979
1980         /*
1981          * Some madvise calls directly modify the vm_map_entry, in which case
1982          * we need to use an exclusive lock on the map and we need to perform 
1983          * various clipping operations.  Otherwise we only need a read-lock
1984          * on the map.
1985          */
1986         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1987
1988         switch(behav) {
1989         case MADV_NORMAL:
1990         case MADV_SEQUENTIAL:
1991         case MADV_RANDOM:
1992         case MADV_NOSYNC:
1993         case MADV_AUTOSYNC:
1994         case MADV_NOCORE:
1995         case MADV_CORE:
1996         case MADV_SETMAP:
1997                 modify_map = 1;
1998                 vm_map_lock(map);
1999                 break;
2000         case MADV_INVAL:
2001         case MADV_WILLNEED:
2002         case MADV_DONTNEED:
2003         case MADV_FREE:
2004                 vm_map_lock_read(map);
2005                 break;
2006         default:
2007                 vm_map_entry_release(count);
2008                 return (EINVAL);
2009         }
2010
2011         /*
2012          * Locate starting entry and clip if necessary.
2013          */
2014
2015         VM_MAP_RANGE_CHECK(map, start, end);
2016
2017         if (vm_map_lookup_entry(map, start, &entry)) {
2018                 if (modify_map)
2019                         vm_map_clip_start(map, entry, start, &count);
2020         } else {
2021                 entry = entry->next;
2022         }
2023
2024         if (modify_map) {
2025                 /*
2026                  * madvise behaviors that are implemented in the vm_map_entry.
2027                  *
2028                  * We clip the vm_map_entry so that behavioral changes are
2029                  * limited to the specified address range.
2030                  */
2031                 for (current = entry;
2032                      (current != &map->header) && (current->start < end);
2033                      current = current->next
2034                 ) {
2035                         if (current->maptype == VM_MAPTYPE_SUBMAP)
2036                                 continue;
2037
2038                         vm_map_clip_end(map, current, end, &count);
2039
2040                         switch (behav) {
2041                         case MADV_NORMAL:
2042                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
2043                                 break;
2044                         case MADV_SEQUENTIAL:
2045                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
2046                                 break;
2047                         case MADV_RANDOM:
2048                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
2049                                 break;
2050                         case MADV_NOSYNC:
2051                                 current->eflags |= MAP_ENTRY_NOSYNC;
2052                                 break;
2053                         case MADV_AUTOSYNC:
2054                                 current->eflags &= ~MAP_ENTRY_NOSYNC;
2055                                 break;
2056                         case MADV_NOCORE:
2057                                 current->eflags |= MAP_ENTRY_NOCOREDUMP;
2058                                 break;
2059                         case MADV_CORE:
2060                                 current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
2061                                 break;
2062                         case MADV_SETMAP:
2063                                 /*
2064                                  * Set the page directory page for a map
2065                                  * governed by a virtual page table.  Mark
2066                                  * the entry as being governed by a virtual
2067                                  * page table if it is not.
2068                                  *
2069                                  * XXX the page directory page is stored
2070                                  * in the avail_ssize field if the map_entry.
2071                                  *
2072                                  * XXX the map simplification code does not
2073                                  * compare this field so weird things may
2074                                  * happen if you do not apply this function
2075                                  * to the entire mapping governed by the
2076                                  * virtual page table.
2077                                  */
2078                                 if (current->maptype != VM_MAPTYPE_VPAGETABLE) {
2079                                         error = EINVAL;
2080                                         break;
2081                                 }
2082                                 current->aux.master_pde = value;
2083                                 pmap_remove(map->pmap,
2084                                             current->start, current->end);
2085                                 break;
2086                         case MADV_INVAL:
2087                                 /*
2088                                  * Invalidate the related pmap entries, used
2089                                  * to flush portions of the real kernel's
2090                                  * pmap when the caller has removed or
2091                                  * modified existing mappings in a virtual
2092                                  * page table.
2093                                  *
2094                                  * (exclusive locked map version does not
2095                                  * need the range interlock).
2096                                  */
2097                                 pmap_remove(map->pmap,
2098                                             current->start, current->end);
2099                                 break;
2100                         default:
2101                                 error = EINVAL;
2102                                 break;
2103                         }
2104                         vm_map_simplify_entry(map, current, &count);
2105                 }
2106                 vm_map_unlock(map);
2107         } else {
2108                 vm_pindex_t pindex;
2109                 vm_pindex_t delta;
2110
2111                 /*
2112                  * madvise behaviors that are implemented in the underlying
2113                  * vm_object.
2114                  *
2115                  * Since we don't clip the vm_map_entry, we have to clip
2116                  * the vm_object pindex and count.
2117                  *
2118                  * NOTE!  These functions are only supported on normal maps,
2119                  *        except MADV_INVAL which is also supported on
2120                  *        virtual page tables.
2121                  */
2122                 for (current = entry;
2123                      (current != &map->header) && (current->start < end);
2124                      current = current->next
2125                 ) {
2126                         vm_offset_t useStart;
2127
2128                         if (current->maptype != VM_MAPTYPE_NORMAL &&
2129                             (current->maptype != VM_MAPTYPE_VPAGETABLE ||
2130                              behav != MADV_INVAL)) {
2131                                 continue;
2132                         }
2133
2134                         pindex = OFF_TO_IDX(current->offset);
2135                         delta = atop(current->end - current->start);
2136                         useStart = current->start;
2137
2138                         if (current->start < start) {
2139                                 pindex += atop(start - current->start);
2140                                 delta -= atop(start - current->start);
2141                                 useStart = start;
2142                         }
2143                         if (current->end > end)
2144                                 delta -= atop(current->end - end);
2145
2146                         if ((vm_spindex_t)delta <= 0)
2147                                 continue;
2148
2149                         if (behav == MADV_INVAL) {
2150                                 /*
2151                                  * Invalidate the related pmap entries, used
2152                                  * to flush portions of the real kernel's
2153                                  * pmap when the caller has removed or
2154                                  * modified existing mappings in a virtual
2155                                  * page table.
2156                                  *
2157                                  * (shared locked map version needs the
2158                                  * interlock, see vm_fault()).
2159                                  */
2160                                 struct vm_map_ilock ilock;
2161
2162                                 KASSERT(useStart >= VM_MIN_USER_ADDRESS &&
2163                                             useStart + ptoa(delta) <=
2164                                             VM_MAX_USER_ADDRESS,
2165                                          ("Bad range %016jx-%016jx (%016jx)",
2166                                          useStart, useStart + ptoa(delta),
2167                                          delta));
2168                                 vm_map_interlock(map, &ilock,
2169                                                  useStart,
2170                                                  useStart + ptoa(delta));
2171                                 pmap_remove(map->pmap,
2172                                             useStart,
2173                                             useStart + ptoa(delta));
2174                                 vm_map_deinterlock(map, &ilock);
2175                         } else {
2176                                 vm_object_madvise(current->object.vm_object,
2177                                                   pindex, delta, behav);
2178                         }
2179
2180                         /*
2181                          * Try to populate the page table.  Mappings governed
2182                          * by virtual page tables cannot be pre-populated
2183                          * without a lot of work so don't try.
2184                          */
2185                         if (behav == MADV_WILLNEED &&
2186                             current->maptype != VM_MAPTYPE_VPAGETABLE) {
2187                                 pmap_object_init_pt(
2188                                     map->pmap, 
2189                                     useStart,
2190                                     current->protection,
2191                                     current->object.vm_object,
2192                                     pindex, 
2193                                     (count << PAGE_SHIFT),
2194                                     MAP_PREFAULT_MADVISE
2195                                 );
2196                         }
2197                 }
2198                 vm_map_unlock_read(map);
2199         }
2200         vm_map_entry_release(count);
2201         return(error);
2202 }       
2203
2204
2205 /*
2206  * Sets the inheritance of the specified address range in the target map.
2207  * Inheritance affects how the map will be shared with child maps at the
2208  * time of vm_map_fork.
2209  */
2210 int
2211 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
2212                vm_inherit_t new_inheritance)
2213 {
2214         vm_map_entry_t entry;
2215         vm_map_entry_t temp_entry;
2216         int count;
2217
2218         switch (new_inheritance) {
2219         case VM_INHERIT_NONE:
2220         case VM_INHERIT_COPY:
2221         case VM_INHERIT_SHARE:
2222                 break;
2223         default:
2224                 return (KERN_INVALID_ARGUMENT);
2225         }
2226
2227         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2228         vm_map_lock(map);
2229
2230         VM_MAP_RANGE_CHECK(map, start, end);
2231
2232         if (vm_map_lookup_entry(map, start, &temp_entry)) {
2233                 entry = temp_entry;
2234                 vm_map_clip_start(map, entry, start, &count);
2235         } else
2236                 entry = temp_entry->next;
2237
2238         while ((entry != &map->header) && (entry->start < end)) {
2239                 vm_map_clip_end(map, entry, end, &count);
2240
2241                 entry->inheritance = new_inheritance;
2242
2243                 vm_map_simplify_entry(map, entry, &count);
2244
2245                 entry = entry->next;
2246         }
2247         vm_map_unlock(map);
2248         vm_map_entry_release(count);
2249         return (KERN_SUCCESS);
2250 }
2251
2252 /*
2253  * Implement the semantics of mlock
2254  */
2255 int
2256 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t real_end,
2257               boolean_t new_pageable)
2258 {
2259         vm_map_entry_t entry;
2260         vm_map_entry_t start_entry;
2261         vm_offset_t end;
2262         int rv = KERN_SUCCESS;
2263         int count;
2264
2265         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2266         vm_map_lock(map);
2267         VM_MAP_RANGE_CHECK(map, start, real_end);
2268         end = real_end;
2269
2270         start_entry = vm_map_clip_range(map, start, end, &count,
2271                                         MAP_CLIP_NO_HOLES);
2272         if (start_entry == NULL) {
2273                 vm_map_unlock(map);
2274                 vm_map_entry_release(count);
2275                 return (KERN_INVALID_ADDRESS);
2276         }
2277
2278         if (new_pageable == 0) {
2279                 entry = start_entry;
2280                 while ((entry != &map->header) && (entry->start < end)) {
2281                         vm_offset_t save_start;
2282                         vm_offset_t save_end;
2283
2284                         /*
2285                          * Already user wired or hard wired (trivial cases)
2286                          */
2287                         if (entry->eflags & MAP_ENTRY_USER_WIRED) {
2288                                 entry = entry->next;
2289                                 continue;
2290                         }
2291                         if (entry->wired_count != 0) {
2292                                 entry->wired_count++;
2293                                 entry->eflags |= MAP_ENTRY_USER_WIRED;
2294                                 entry = entry->next;
2295                                 continue;
2296                         }
2297
2298                         /*
2299                          * A new wiring requires instantiation of appropriate
2300                          * management structures and the faulting in of the
2301                          * page.
2302                          */
2303                         if (entry->maptype == VM_MAPTYPE_NORMAL ||
2304                             entry->maptype == VM_MAPTYPE_VPAGETABLE) {
2305                                 int copyflag = entry->eflags &
2306                                                MAP_ENTRY_NEEDS_COPY;
2307                                 if (copyflag && ((entry->protection &
2308                                                   VM_PROT_WRITE) != 0)) {
2309                                         vm_map_entry_shadow(entry, 0);
2310                                 } else if (entry->object.vm_object == NULL &&
2311                                            !map->system_map) {
2312                                         vm_map_entry_allocate_object(entry);
2313                                 }
2314                         }
2315                         entry->wired_count++;
2316                         entry->eflags |= MAP_ENTRY_USER_WIRED;
2317
2318                         /*
2319                          * Now fault in the area.  Note that vm_fault_wire()
2320                          * may release the map lock temporarily, it will be
2321                          * relocked on return.  The in-transition
2322                          * flag protects the entries. 
2323                          */
2324                         save_start = entry->start;
2325                         save_end = entry->end;
2326                         rv = vm_fault_wire(map, entry, TRUE, 0);
2327                         if (rv) {
2328                                 CLIP_CHECK_BACK(entry, save_start);
2329                                 for (;;) {
2330                                         KASSERT(entry->wired_count == 1, ("bad wired_count on entry"));
2331                                         entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2332                                         entry->wired_count = 0;
2333                                         if (entry->end == save_end)
2334                                                 break;
2335                                         entry = entry->next;
2336                                         KASSERT(entry != &map->header, ("bad entry clip during backout"));
2337                                 }
2338                                 end = save_start;       /* unwire the rest */
2339                                 break;
2340                         }
2341                         /*
2342                          * note that even though the entry might have been
2343                          * clipped, the USER_WIRED flag we set prevents
2344                          * duplication so we do not have to do a 
2345                          * clip check.
2346                          */
2347                         entry = entry->next;
2348                 }
2349
2350                 /*
2351                  * If we failed fall through to the unwiring section to
2352                  * unwire what we had wired so far.  'end' has already
2353                  * been adjusted.
2354                  */
2355                 if (rv)
2356                         new_pageable = 1;
2357
2358                 /*
2359                  * start_entry might have been clipped if we unlocked the
2360                  * map and blocked.  No matter how clipped it has gotten
2361                  * there should be a fragment that is on our start boundary.
2362                  */
2363                 CLIP_CHECK_BACK(start_entry, start);
2364         }
2365
2366         /*
2367          * Deal with the unwiring case.
2368          */
2369         if (new_pageable) {
2370                 /*
2371                  * This is the unwiring case.  We must first ensure that the
2372                  * range to be unwired is really wired down.  We know there
2373                  * are no holes.
2374                  */
2375                 entry = start_entry;
2376                 while ((entry != &map->header) && (entry->start < end)) {
2377                         if ((entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2378                                 rv = KERN_INVALID_ARGUMENT;
2379                                 goto done;
2380                         }
2381                         KASSERT(entry->wired_count != 0, ("wired count was 0 with USER_WIRED set! %p", entry));
2382                         entry = entry->next;
2383                 }
2384
2385                 /*
2386                  * Now decrement the wiring count for each region. If a region
2387                  * becomes completely unwired, unwire its physical pages and
2388                  * mappings.
2389                  */
2390                 /*
2391                  * The map entries are processed in a loop, checking to
2392                  * make sure the entry is wired and asserting it has a wired
2393                  * count. However, another loop was inserted more-or-less in
2394                  * the middle of the unwiring path. This loop picks up the
2395                  * "entry" loop variable from the first loop without first
2396                  * setting it to start_entry. Naturally, the secound loop
2397                  * is never entered and the pages backing the entries are
2398                  * never unwired. This can lead to a leak of wired pages.
2399                  */
2400                 entry = start_entry;
2401                 while ((entry != &map->header) && (entry->start < end)) {
2402                         KASSERT(entry->eflags & MAP_ENTRY_USER_WIRED,
2403                                 ("expected USER_WIRED on entry %p", entry));
2404                         entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2405                         entry->wired_count--;
2406                         if (entry->wired_count == 0)
2407                                 vm_fault_unwire(map, entry);
2408                         entry = entry->next;
2409                 }
2410         }
2411 done:
2412         vm_map_unclip_range(map, start_entry, start, real_end, &count,
2413                 MAP_CLIP_NO_HOLES);
2414         map->timestamp++;
2415         vm_map_unlock(map);
2416         vm_map_entry_release(count);
2417         return (rv);
2418 }
2419
2420 /*
2421  * Sets the pageability of the specified address range in the target map.
2422  * Regions specified as not pageable require locked-down physical
2423  * memory and physical page maps.
2424  *
2425  * The map must not be locked, but a reference must remain to the map
2426  * throughout the call.
2427  *
2428  * This function may be called via the zalloc path and must properly
2429  * reserve map entries for kernel_map.
2430  *
2431  * No requirements.
2432  */
2433 int
2434 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t real_end, int kmflags)
2435 {
2436         vm_map_entry_t entry;
2437         vm_map_entry_t start_entry;
2438         vm_offset_t end;
2439         int rv = KERN_SUCCESS;
2440         int count;
2441
2442         if (kmflags & KM_KRESERVE)
2443                 count = vm_map_entry_kreserve(MAP_RESERVE_COUNT);
2444         else
2445                 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2446         vm_map_lock(map);
2447         VM_MAP_RANGE_CHECK(map, start, real_end);
2448         end = real_end;
2449
2450         start_entry = vm_map_clip_range(map, start, end, &count,
2451                                         MAP_CLIP_NO_HOLES);
2452         if (start_entry == NULL) {
2453                 vm_map_unlock(map);
2454                 rv = KERN_INVALID_ADDRESS;
2455                 goto failure;
2456         }
2457         if ((kmflags & KM_PAGEABLE) == 0) {
2458                 /*
2459                  * Wiring.  
2460                  *
2461                  * 1.  Holding the write lock, we create any shadow or zero-fill
2462                  * objects that need to be created. Then we clip each map
2463                  * entry to the region to be wired and increment its wiring
2464                  * count.  We create objects before clipping the map entries
2465                  * to avoid object proliferation.
2466                  *
2467                  * 2.  We downgrade to a read lock, and call vm_fault_wire to
2468                  * fault in the pages for any newly wired area (wired_count is
2469                  * 1).
2470                  *
2471                  * Downgrading to a read lock for vm_fault_wire avoids a 
2472                  * possible deadlock with another process that may have faulted
2473                  * on one of the pages to be wired (it would mark the page busy,
2474                  * blocking us, then in turn block on the map lock that we
2475                  * hold).  Because of problems in the recursive lock package,
2476                  * we cannot upgrade to a write lock in vm_map_lookup.  Thus,
2477                  * any actions that require the write lock must be done
2478                  * beforehand.  Because we keep the read lock on the map, the
2479                  * copy-on-write status of the entries we modify here cannot
2480                  * change.
2481                  */
2482                 entry = start_entry;
2483                 while ((entry != &map->header) && (entry->start < end)) {
2484                         /*
2485                          * Trivial case if the entry is already wired
2486                          */
2487                         if (entry->wired_count) {
2488                                 entry->wired_count++;
2489                                 entry = entry->next;
2490                                 continue;
2491                         }
2492
2493                         /*
2494                          * The entry is being newly wired, we have to setup
2495                          * appropriate management structures.  A shadow 
2496                          * object is required for a copy-on-write region,
2497                          * or a normal object for a zero-fill region.  We
2498                          * do not have to do this for entries that point to sub
2499                          * maps because we won't hold the lock on the sub map.
2500                          */
2501                         if (entry->maptype == VM_MAPTYPE_NORMAL ||
2502                             entry->maptype == VM_MAPTYPE_VPAGETABLE) {
2503                                 int copyflag = entry->eflags &
2504                                                MAP_ENTRY_NEEDS_COPY;
2505                                 if (copyflag && ((entry->protection &
2506                                                   VM_PROT_WRITE) != 0)) {
2507                                         vm_map_entry_shadow(entry, 0);
2508                                 } else if (entry->object.vm_object == NULL &&
2509                                            !map->system_map) {
2510                                         vm_map_entry_allocate_object(entry);
2511                                 }
2512                         }
2513
2514                         entry->wired_count++;
2515                         entry = entry->next;
2516                 }
2517
2518                 /*
2519                  * Pass 2.
2520                  */
2521
2522                 /*
2523                  * HACK HACK HACK HACK
2524                  *
2525                  * vm_fault_wire() temporarily unlocks the map to avoid
2526                  * deadlocks.  The in-transition flag from vm_map_clip_range
2527                  * call should protect us from changes while the map is
2528                  * unlocked.  T
2529                  *
2530                  * NOTE: Previously this comment stated that clipping might
2531                  *       still occur while the entry is unlocked, but from
2532                  *       what I can tell it actually cannot.
2533                  *
2534                  *       It is unclear whether the CLIP_CHECK_*() calls
2535                  *       are still needed but we keep them in anyway.
2536                  *
2537                  * HACK HACK HACK HACK
2538                  */
2539
2540                 entry = start_entry;
2541                 while (entry != &map->header && entry->start < end) {
2542                         /*
2543                          * If vm_fault_wire fails for any page we need to undo
2544                          * what has been done.  We decrement the wiring count
2545                          * for those pages which have not yet been wired (now)
2546                          * and unwire those that have (later).
2547                          */
2548                         vm_offset_t save_start = entry->start;
2549                         vm_offset_t save_end = entry->end;
2550
2551                         if (entry->wired_count == 1)
2552                                 rv = vm_fault_wire(map, entry, FALSE, kmflags);
2553                         if (rv) {
2554                                 CLIP_CHECK_BACK(entry, save_start);
2555                                 for (;;) {
2556                                         KASSERT(entry->wired_count == 1, ("wired_count changed unexpectedly"));
2557                                         entry->wired_count = 0;
2558                                         if (entry->end == save_end)
2559                                                 break;
2560                                         entry = entry->next;
2561                                         KASSERT(entry != &map->header, ("bad entry clip during backout"));
2562                                 }
2563                                 end = save_start;
2564                                 break;
2565                         }
2566                         CLIP_CHECK_FWD(entry, save_end);
2567                         entry = entry->next;
2568                 }
2569
2570                 /*
2571                  * If a failure occured undo everything by falling through
2572                  * to the unwiring code.  'end' has already been adjusted
2573                  * appropriately.
2574                  */
2575                 if (rv)
2576                         kmflags |= KM_PAGEABLE;
2577
2578                 /*
2579                  * start_entry is still IN_TRANSITION but may have been 
2580                  * clipped since vm_fault_wire() unlocks and relocks the
2581                  * map.  No matter how clipped it has gotten there should
2582                  * be a fragment that is on our start boundary.
2583                  */
2584                 CLIP_CHECK_BACK(start_entry, start);
2585         }
2586
2587         if (kmflags & KM_PAGEABLE) {
2588                 /*
2589                  * This is the unwiring case.  We must first ensure that the
2590                  * range to be unwired is really wired down.  We know there
2591                  * are no holes.
2592                  */
2593                 entry = start_entry;
2594                 while ((entry != &map->header) && (entry->start < end)) {
2595                         if (entry->wired_count == 0) {
2596                                 rv = KERN_INVALID_ARGUMENT;
2597                                 goto done;
2598                         }
2599                         entry = entry->next;
2600                 }
2601
2602                 /*
2603                  * Now decrement the wiring count for each region. If a region
2604                  * becomes completely unwired, unwire its physical pages and
2605                  * mappings.
2606                  */
2607                 entry = start_entry;
2608                 while ((entry != &map->header) && (entry->start < end)) {
2609                         entry->wired_count--;
2610                         if (entry->wired_count == 0)
2611                                 vm_fault_unwire(map, entry);
2612                         entry = entry->next;
2613                 }
2614         }
2615 done:
2616         vm_map_unclip_range(map, start_entry, start, real_end,
2617                             &count, MAP_CLIP_NO_HOLES);
2618         map->timestamp++;
2619         vm_map_unlock(map);
2620 failure:
2621         if (kmflags & KM_KRESERVE)
2622                 vm_map_entry_krelease(count);
2623         else
2624                 vm_map_entry_release(count);
2625         return (rv);
2626 }
2627
2628 /*
2629  * Mark a newly allocated address range as wired but do not fault in
2630  * the pages.  The caller is expected to load the pages into the object.
2631  *
2632  * The map must be locked on entry and will remain locked on return.
2633  * No other requirements.
2634  */
2635 void
2636 vm_map_set_wired_quick(vm_map_t map, vm_offset_t addr, vm_size_t size,
2637                        int *countp)
2638 {
2639         vm_map_entry_t scan;
2640         vm_map_entry_t entry;
2641
2642         entry = vm_map_clip_range(map, addr, addr + size,
2643                                   countp, MAP_CLIP_NO_HOLES);
2644         for (scan = entry;
2645              scan != &map->header && scan->start < addr + size;
2646              scan = scan->next) {
2647             KKASSERT(scan->wired_count == 0);
2648             scan->wired_count = 1;
2649         }
2650         vm_map_unclip_range(map, entry, addr, addr + size,
2651                             countp, MAP_CLIP_NO_HOLES);
2652 }
2653
2654 /*
2655  * Push any dirty cached pages in the address range to their pager.
2656  * If syncio is TRUE, dirty pages are written synchronously.
2657  * If invalidate is TRUE, any cached pages are freed as well.
2658  *
2659  * This routine is called by sys_msync()
2660  *
2661  * Returns an error if any part of the specified range is not mapped.
2662  *
2663  * No requirements.
2664  */
2665 int
2666 vm_map_clean(vm_map_t map, vm_offset_t start, vm_offset_t end,
2667              boolean_t syncio, boolean_t invalidate)
2668 {
2669         vm_map_entry_t current;
2670         vm_map_entry_t entry;
2671         vm_size_t size;
2672         vm_object_t object;
2673         vm_object_t tobj;
2674         vm_ooffset_t offset;
2675
2676         vm_map_lock_read(map);
2677         VM_MAP_RANGE_CHECK(map, start, end);
2678         if (!vm_map_lookup_entry(map, start, &entry)) {
2679                 vm_map_unlock_read(map);
2680                 return (KERN_INVALID_ADDRESS);
2681         }
2682         lwkt_gettoken(&map->token);
2683
2684         /*
2685          * Make a first pass to check for holes.
2686          */
2687         for (current = entry; current->start < end; current = current->next) {
2688                 if (current->maptype == VM_MAPTYPE_SUBMAP) {
2689                         lwkt_reltoken(&map->token);
2690                         vm_map_unlock_read(map);
2691                         return (KERN_INVALID_ARGUMENT);
2692                 }
2693                 if (end > current->end &&
2694                     (current->next == &map->header ||
2695                         current->end != current->next->start)) {
2696                         lwkt_reltoken(&map->token);
2697                         vm_map_unlock_read(map);
2698                         return (KERN_INVALID_ADDRESS);
2699                 }
2700         }
2701
2702         if (invalidate)
2703                 pmap_remove(vm_map_pmap(map), start, end);
2704
2705         /*
2706          * Make a second pass, cleaning/uncaching pages from the indicated
2707          * objects as we go.
2708          */
2709         for (current = entry; current->start < end; current = current->next) {
2710                 offset = current->offset + (start - current->start);
2711                 size = (end <= current->end ? end : current->end) - start;
2712
2713                 switch(current->maptype) {
2714                 case VM_MAPTYPE_SUBMAP:
2715                 {
2716                         vm_map_t smap;
2717                         vm_map_entry_t tentry;
2718                         vm_size_t tsize;
2719
2720                         smap = current->object.sub_map;
2721                         vm_map_lock_read(smap);
2722                         vm_map_lookup_entry(smap, offset, &tentry);
2723                         tsize = tentry->end - offset;
2724                         if (tsize < size)
2725                                 size = tsize;
2726                         object = tentry->object.vm_object;
2727                         offset = tentry->offset + (offset - tentry->start);
2728                         vm_map_unlock_read(smap);
2729                         break;
2730                 }
2731                 case VM_MAPTYPE_NORMAL:
2732                 case VM_MAPTYPE_VPAGETABLE:
2733                         object = current->object.vm_object;
2734                         break;
2735                 default:
2736                         object = NULL;
2737                         break;
2738                 }
2739
2740                 if (object)
2741                         vm_object_hold(object);
2742
2743                 /*
2744                  * Note that there is absolutely no sense in writing out
2745                  * anonymous objects, so we track down the vnode object
2746                  * to write out.
2747                  * We invalidate (remove) all pages from the address space
2748                  * anyway, for semantic correctness.
2749                  *
2750                  * note: certain anonymous maps, such as MAP_NOSYNC maps,
2751                  * may start out with a NULL object.
2752                  */
2753                 while (object && (tobj = object->backing_object) != NULL) {
2754                         vm_object_hold(tobj);
2755                         if (tobj == object->backing_object) {
2756                                 vm_object_lock_swap();
2757                                 offset += object->backing_object_offset;
2758                                 vm_object_drop(object);
2759                                 object = tobj;
2760                                 if (object->size < OFF_TO_IDX(offset + size))
2761                                         size = IDX_TO_OFF(object->size) -
2762                                                offset;
2763                                 break;
2764                         }
2765                         vm_object_drop(tobj);
2766                 }
2767                 if (object && (object->type == OBJT_VNODE) && 
2768                     (current->protection & VM_PROT_WRITE) &&
2769                     (object->flags & OBJ_NOMSYNC) == 0) {
2770                         /*
2771                          * Flush pages if writing is allowed, invalidate them
2772                          * if invalidation requested.  Pages undergoing I/O
2773                          * will be ignored by vm_object_page_remove().
2774                          *
2775                          * We cannot lock the vnode and then wait for paging
2776                          * to complete without deadlocking against vm_fault.
2777                          * Instead we simply call vm_object_page_remove() and
2778                          * allow it to block internally on a page-by-page 
2779                          * basis when it encounters pages undergoing async 
2780                          * I/O.
2781                          */
2782                         int flags;
2783
2784                         /* no chain wait needed for vnode objects */
2785                         vm_object_reference_locked(object);
2786                         vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY);
2787                         flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
2788                         flags |= invalidate ? OBJPC_INVAL : 0;
2789
2790                         /*
2791                          * When operating on a virtual page table just
2792                          * flush the whole object.  XXX we probably ought
2793                          * to 
2794                          */
2795                         switch(current->maptype) {
2796                         case VM_MAPTYPE_NORMAL:
2797                                 vm_object_page_clean(object,
2798                                     OFF_TO_IDX(offset),
2799                                     OFF_TO_IDX(offset + size + PAGE_MASK),
2800                                     flags);
2801                                 break;
2802                         case VM_MAPTYPE_VPAGETABLE:
2803                                 vm_object_page_clean(object, 0, 0, flags);
2804                                 break;
2805                         }
2806                         vn_unlock(((struct vnode *)object->handle));
2807                         vm_object_deallocate_locked(object);
2808                 }
2809                 if (object && invalidate &&
2810                    ((object->type == OBJT_VNODE) ||
2811                     (object->type == OBJT_DEVICE) ||
2812                     (object->type == OBJT_MGTDEVICE))) {
2813                         int clean_only = 
2814                                 ((object->type == OBJT_DEVICE) ||
2815                                 (object->type == OBJT_MGTDEVICE)) ? FALSE : TRUE;
2816                         /* no chain wait needed for vnode/device objects */
2817                         vm_object_reference_locked(object);
2818                         switch(current->maptype) {
2819                         case VM_MAPTYPE_NORMAL:
2820                                 vm_object_page_remove(object,
2821                                     OFF_TO_IDX(offset),
2822                                     OFF_TO_IDX(offset + size + PAGE_MASK),
2823                                     clean_only);
2824                                 break;
2825                         case VM_MAPTYPE_VPAGETABLE:
2826                                 vm_object_page_remove(object, 0, 0, clean_only);
2827                                 break;
2828                         }
2829                         vm_object_deallocate_locked(object);
2830                 }
2831                 start += size;
2832                 if (object)
2833                         vm_object_drop(object);
2834         }
2835
2836         lwkt_reltoken(&map->token);
2837         vm_map_unlock_read(map);
2838
2839         return (KERN_SUCCESS);
2840 }
2841
2842 /*
2843  * Make the region specified by this entry pageable.
2844  *
2845  * The vm_map must be exclusively locked.
2846  */
2847 static void 
2848 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
2849 {
2850         entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2851         entry->wired_count = 0;
2852         vm_fault_unwire(map, entry);
2853 }
2854
2855 /*
2856  * Deallocate the given entry from the target map.
2857  *
2858  * The vm_map must be exclusively locked.
2859  */
2860 static void
2861 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry, int *countp)
2862 {
2863         vm_map_entry_unlink(map, entry);
2864         map->size -= entry->end - entry->start;
2865
2866         switch(entry->maptype) {
2867         case VM_MAPTYPE_NORMAL:
2868         case VM_MAPTYPE_VPAGETABLE:
2869         case VM_MAPTYPE_SUBMAP:
2870                 vm_object_deallocate(entry->object.vm_object);
2871                 break;
2872         case VM_MAPTYPE_UKSMAP:
2873                 /* XXX TODO */
2874                 break;
2875         default:
2876                 break;
2877         }
2878
2879         vm_map_entry_dispose(map, entry, countp);
2880 }
2881
2882 /*
2883  * Deallocates the given address range from the target map.
2884  *
2885  * The vm_map must be exclusively locked.
2886  */
2887 int
2888 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end, int *countp)
2889 {
2890         vm_object_t object;
2891         vm_map_entry_t entry;
2892         vm_map_entry_t first_entry;
2893         vm_offset_t hole_start;
2894
2895         ASSERT_VM_MAP_LOCKED(map);
2896         lwkt_gettoken(&map->token);
2897 again:
2898         /*
2899          * Find the start of the region, and clip it.  Set entry to point
2900          * at the first record containing the requested address or, if no
2901          * such record exists, the next record with a greater address.  The
2902          * loop will run from this point until a record beyond the termination
2903          * address is encountered.
2904          *
2905          * Adjust freehint[] for either the clip case or the extension case.
2906          *
2907          * GGG see other GGG comment.
2908          */
2909         if (vm_map_lookup_entry(map, start, &first_entry)) {
2910                 entry = first_entry;
2911                 vm_map_clip_start(map, entry, start, countp);
2912                 hole_start = start;
2913         } else {
2914                 entry = first_entry->next;
2915                 if (entry == &map->header)
2916                         hole_start = first_entry->start;
2917                 else
2918                         hole_start = first_entry->end;
2919         }
2920
2921         /*
2922          * Step through all entries in this region
2923          */
2924         while ((entry != &map->header) && (entry->start < end)) {
2925                 vm_map_entry_t next;
2926                 vm_offset_t s, e;
2927                 vm_pindex_t offidxstart, offidxend, count;
2928
2929                 /*
2930                  * If we hit an in-transition entry we have to sleep and
2931                  * retry.  It's easier (and not really slower) to just retry
2932                  * since this case occurs so rarely and the hint is already
2933                  * pointing at the right place.  We have to reset the
2934                  * start offset so as not to accidently delete an entry
2935                  * another process just created in vacated space.
2936                  */
2937                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2938                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2939                         start = entry->start;
2940                         ++mycpu->gd_cnt.v_intrans_coll;
2941                         ++mycpu->gd_cnt.v_intrans_wait;
2942                         vm_map_transition_wait(map);
2943                         goto again;
2944                 }
2945                 vm_map_clip_end(map, entry, end, countp);
2946
2947                 s = entry->start;
2948                 e = entry->end;
2949                 next = entry->next;
2950
2951                 offidxstart = OFF_TO_IDX(entry->offset);
2952                 count = OFF_TO_IDX(e - s);
2953
2954                 switch(entry->maptype) {
2955                 case VM_MAPTYPE_NORMAL:
2956                 case VM_MAPTYPE_VPAGETABLE:
2957                 case VM_MAPTYPE_SUBMAP:
2958                         object = entry->object.vm_object;
2959                         break;
2960                 default:
2961                         object = NULL;
2962                         break;
2963                 }
2964
2965                 /*
2966                  * Unwire before removing addresses from the pmap; otherwise,
2967                  * unwiring will put the entries back in the pmap.
2968                  */
2969                 if (entry->wired_count != 0)
2970                         vm_map_entry_unwire(map, entry);
2971
2972                 offidxend = offidxstart + count;
2973
2974                 if (object == &kernel_object) {
2975                         vm_object_hold(object);
2976                         vm_object_page_remove(object, offidxstart,
2977                                               offidxend, FALSE);
2978                         vm_object_drop(object);
2979                 } else if (object && object->type != OBJT_DEFAULT &&
2980                            object->type != OBJT_SWAP) {
2981                         /*
2982                          * vnode object routines cannot be chain-locked,
2983                          * but since we aren't removing pages from the
2984                          * object here we can use a shared hold.
2985                          */
2986                         vm_object_hold_shared(object);
2987                         pmap_remove(map->pmap, s, e);
2988                         vm_object_drop(object);
2989                 } else if (object) {
2990                         vm_object_hold(object);
2991                         vm_object_chain_acquire(object, 0);
2992                         pmap_remove(map->pmap, s, e);
2993
2994                         if (object != NULL &&
2995                             object->ref_count != 1 &&
2996                             (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) ==
2997                              OBJ_ONEMAPPING &&
2998                             (object->type == OBJT_DEFAULT ||
2999                              object->type == OBJT_SWAP)) {
3000                                 vm_object_collapse(object, NULL);
3001                                 vm_object_page_remove(object, offidxstart,
3002                                                       offidxend, FALSE);
3003                                 if (object->type == OBJT_SWAP) {
3004                                         swap_pager_freespace(object,
3005                                                              offidxstart,
3006                                                              count);
3007                                 }
3008                                 if (offidxend >= object->size &&
3009                                     offidxstart < object->size) {
3010                                         object->size = offidxstart;
3011                                 }
3012                         }
3013                         vm_object_chain_release(object);
3014                         vm_object_drop(object);
3015                 } else if (entry->maptype == VM_MAPTYPE_UKSMAP) {
3016                         pmap_remove(map->pmap, s, e);
3017                 }
3018
3019                 /*
3020                  * Delete the entry (which may delete the object) only after
3021                  * removing all pmap entries pointing to its pages.
3022                  * (Otherwise, its page frames may be reallocated, and any
3023                  * modify bits will be set in the wrong object!)
3024                  */
3025                 vm_map_entry_delete(map, entry, countp);
3026                 entry = next;
3027         }
3028         if (entry == &map->header)
3029                 vm_map_freehint_hole(map, hole_start, entry->end - hole_start);
3030         else
3031                 vm_map_freehint_hole(map, hole_start,
3032                                      entry->start - hole_start);
3033
3034         lwkt_reltoken(&map->token);
3035
3036         return (KERN_SUCCESS);
3037 }
3038
3039 /*
3040  * Remove the given address range from the target map.
3041  * This is the exported form of vm_map_delete.
3042  *
3043  * No requirements.
3044  */
3045 int
3046 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
3047 {
3048         int result;
3049         int count;
3050
3051         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
3052         vm_map_lock(map);
3053         VM_MAP_RANGE_CHECK(map, start, end);
3054         result = vm_map_delete(map, start, end, &count);
3055         vm_map_unlock(map);
3056         vm_map_entry_release(count);
3057
3058         return (result);
3059 }
3060
3061 /*
3062  * Assert that the target map allows the specified privilege on the
3063  * entire address region given.  The entire region must be allocated.
3064  *
3065  * The caller must specify whether the vm_map is already locked or not.
3066  */
3067 boolean_t
3068 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
3069                         vm_prot_t protection, boolean_t have_lock)
3070 {
3071         vm_map_entry_t entry;
3072         vm_map_entry_t tmp_entry;
3073         boolean_t result;
3074
3075         if (have_lock == FALSE)
3076                 vm_map_lock_read(map);
3077
3078         if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
3079                 if (have_lock == FALSE)
3080                         vm_map_unlock_read(map);
3081                 return (FALSE);
3082         }
3083         entry = tmp_entry;
3084
3085         result = TRUE;
3086         while (start < end) {
3087                 if (entry == &map->header) {
3088                         result = FALSE;
3089                         break;
3090                 }
3091                 /*
3092                  * No holes allowed!
3093                  */
3094
3095                 if (start < entry->start) {
3096                         result = FALSE;
3097                         break;
3098                 }
3099                 /*
3100                  * Check protection associated with entry.
3101                  */
3102
3103                 if ((entry->protection & protection) != protection) {
3104                         result = FALSE;
3105                         break;
3106                 }
3107                 /* go to next entry */
3108
3109                 start = entry->end;
3110                 entry = entry->next;
3111         }
3112         if (have_lock == FALSE)
3113                 vm_map_unlock_read(map);
3114         return (result);
3115 }
3116
3117 /*
3118  * If appropriate this function shadows the original object with a new object
3119  * and moves the VM pages from the original object to the new object.
3120  * The original object will also be collapsed, if possible.
3121  *
3122  * We can only do this for normal memory objects with a single mapping, and
3123  * it only makes sense to do it if there are 2 or more refs on the original
3124  * object.  i.e. typically a memory object that has been extended into
3125  * multiple vm_map_entry's with non-overlapping ranges.
3126  *
3127  * This makes it easier to remove unused pages and keeps object inheritance
3128  * from being a negative impact on memory usage.
3129  *
3130  * On return the (possibly new) entry->object.vm_object will have an
3131  * additional ref on it for the caller to dispose of (usually by cloning
3132  * the vm_map_entry).  The additional ref had to be done in this routine
3133  * to avoid racing a collapse.  The object's ONEMAPPING flag will also be
3134  * cleared.
3135  *
3136  * The vm_map must be locked and its token held.
3137  */
3138 static void
3139 vm_map_split(vm_map_entry_t entry)
3140 {
3141         /* OPTIMIZED */
3142         vm_object_t oobject, nobject, bobject;
3143         vm_offset_t s, e;
3144         vm_page_t m;
3145         vm_pindex_t offidxstart, offidxend, idx;
3146         vm_size_t size;
3147         vm_ooffset_t offset;
3148         int useshadowlist;
3149
3150         /*
3151          * Optimize away object locks for vnode objects.  Important exit/exec
3152          * critical path.
3153          *
3154          * OBJ_ONEMAPPING doesn't apply to vnode objects but clear the flag
3155          * anyway.
3156          */
3157         oobject = entry->object.vm_object;
3158         if (oobject->type != OBJT_DEFAULT && oobject->type != OBJT_SWAP) {
3159                 vm_object_reference_quick(oobject);
3160                 vm_object_clear_flag(oobject, OBJ_ONEMAPPING);
3161                 return;
3162         }
3163
3164         /*
3165          * Setup.  Chain lock the original object throughout the entire
3166          * routine to prevent new page faults from occuring.
3167          *
3168          * XXX can madvise WILLNEED interfere with us too?
3169          */
3170         vm_object_hold(oobject);
3171         vm_object_chain_acquire(oobject, 0);
3172
3173         /*
3174          * Original object cannot be split?  Might have also changed state.
3175          */
3176         if (oobject->handle == NULL || (oobject->type != OBJT_DEFAULT &&
3177                                         oobject->type != OBJT_SWAP)) {
3178                 vm_object_chain_release(oobject);
3179                 vm_object_reference_locked(oobject);
3180                 vm_object_clear_flag(oobject, OBJ_ONEMAPPING);
3181                 vm_object_drop(oobject);
3182                 return;
3183         }
3184
3185         /*
3186          * Collapse original object with its backing store as an
3187          * optimization to reduce chain lengths when possible.
3188          *
3189          * If ref_count <= 1 there aren't other non-overlapping vm_map_entry's
3190          * for oobject, so there's no point collapsing it.
3191          *
3192          * Then re-check whether the object can be split.
3193          */
3194         vm_object_collapse(oobject, NULL);
3195
3196         if (oobject->ref_count <= 1 ||
3197             (oobject->type != OBJT_DEFAULT && oobject->type != OBJT_SWAP) ||
3198             (oobject->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) != OBJ_ONEMAPPING) {
3199                 vm_object_chain_release(oobject);
3200                 vm_object_reference_locked(oobject);
3201                 vm_object_clear_flag(oobject, OBJ_ONEMAPPING);
3202                 vm_object_drop(oobject);
3203                 return;
3204         }
3205
3206         /*
3207          * Acquire the chain lock on the backing object.
3208          *
3209          * Give bobject an additional ref count for when it will be shadowed
3210          * by nobject.
3211          */
3212         useshadowlist = 0;
3213         if ((bobject = oobject->backing_object) != NULL) {
3214                 if (bobject->type != OBJT_VNODE) {
3215                         useshadowlist = 1;
3216                         vm_object_hold(bobject);
3217                         vm_object_chain_wait(bobject, 0);
3218                         /* ref for shadowing below */
3219                         vm_object_reference_locked(bobject);
3220                         vm_object_chain_acquire(bobject, 0);
3221                         KKASSERT(bobject->backing_object == bobject);
3222                         KKASSERT((bobject->flags & OBJ_DEAD) == 0);
3223                 } else {
3224                         /*
3225                          * vnodes are not placed on the shadow list but
3226                          * they still get another ref for the backing_object
3227                          * reference.
3228                          */
3229                         vm_object_reference_quick(bobject);
3230                 }
3231         }
3232
3233         /*
3234          * Calculate the object page range and allocate the new object.
3235          */
3236         offset = entry->offset;
3237         s = entry->start;
3238         e = entry->end;
3239
3240         offidxstart = OFF_TO_IDX(offset);
3241         offidxend = offidxstart + OFF_TO_IDX(e - s);
3242         size = offidxend - offidxstart;
3243
3244         switch(oobject->type) {
3245         case OBJT_DEFAULT:
3246                 nobject = default_pager_alloc(NULL, IDX_TO_OFF(size),
3247                                               VM_PROT_ALL, 0);
3248                 break;
3249         case OBJT_SWAP:
3250                 nobject = swap_pager_alloc(NULL, IDX_TO_OFF(size),
3251                                            VM_PROT_ALL, 0);
3252                 break;
3253         default:
3254                 /* not reached */
3255                 nobject = NULL;
3256                 KKASSERT(0);
3257         }
3258
3259         if (nobject == NULL) {
3260                 if (bobject) {
3261                         if (useshadowlist) {
3262                                 vm_object_chain_release(bobject);
3263                                 vm_object_deallocate(bobject);
3264                                 vm_object_drop(bobject);
3265                         } else {
3266                                 vm_object_deallocate(bobject);
3267                         }
3268                 }
3269                 vm_object_chain_release(oobject);
3270                 vm_object_reference_locked(oobject);
3271                 vm_object_clear_flag(oobject, OBJ_ONEMAPPING);
3272                 vm_object_drop(oobject);
3273                 return;
3274         }
3275
3276         /*
3277          * The new object will replace entry->object.vm_object so it needs
3278          * a second reference (the caller expects an additional ref).
3279          */
3280         vm_object_hold(nobject);
3281         vm_object_reference_locked(nobject);
3282         vm_object_chain_acquire(nobject, 0);
3283
3284         /*
3285          * nobject shadows bobject (oobject already shadows bobject).
3286          *
3287          * Adding an object to bobject's shadow list requires refing bobject
3288          * which we did above in the useshadowlist case.
3289          */
3290         if (bobject) {
3291                 nobject->backing_object_offset =
3292                     oobject->backing_object_offset + IDX_TO_OFF(offidxstart);
3293                 nobject->backing_object = bobject;
3294                 if (useshadowlist) {
3295                         bobject->shadow_count++;
3296                         atomic_add_int(&bobject->generation, 1);
3297                         LIST_INSERT_HEAD(&bobject->shadow_head,
3298                                          nobject, shadow_list);
3299                         vm_object_clear_flag(bobject, OBJ_ONEMAPPING); /*XXX*/
3300                         vm_object_chain_release(bobject);
3301                         vm_object_drop(bobject);
3302                         vm_object_set_flag(nobject, OBJ_ONSHADOW);
3303                 }
3304         }
3305
3306         /*
3307          * Move the VM pages from oobject to nobject
3308          */
3309         for (idx = 0; idx < size; idx++) {
3310                 vm_page_t m;
3311
3312                 m = vm_page_lookup_busy_wait(oobject, offidxstart + idx,
3313                                              TRUE, "vmpg");
3314                 if (m == NULL)
3315                         continue;
3316
3317                 /*
3318                  * We must wait for pending I/O to complete before we can
3319                  * rename the page.
3320                  *
3321                  * We do not have to VM_PROT_NONE the page as mappings should
3322                  * not be changed by this operation.
3323                  *
3324                  * NOTE: The act of renaming a page updates chaingen for both
3325                  *       objects.
3326                  */
3327                 vm_page_rename(m, nobject, idx);
3328                 /* page automatically made dirty by rename and cache handled */
3329                 /* page remains busy */
3330         }
3331
3332         if (oobject->type == OBJT_SWAP) {
3333                 vm_object_pip_add(oobject, 1);
3334                 /*
3335                  * copy oobject pages into nobject and destroy unneeded
3336                  * pages in shadow object.
3337                  */
3338                 swap_pager_copy(oobject, nobject, offidxstart, 0);
3339                 vm_object_pip_wakeup(oobject);
3340         }
3341
3342         /*
3343          * Wakeup the pages we played with.  No spl protection is needed
3344          * for a simple wakeup.
3345          */
3346         for (idx = 0; idx < size; idx++) {
3347                 m = vm_page_lookup(nobject, idx);
3348                 if (m) {
3349                         KKASSERT(m->flags & PG_BUSY);
3350                         vm_page_wakeup(m);
3351                 }
3352         }
3353         entry->object.vm_object = nobject;
3354         entry->offset = 0LL;
3355
3356         /*
3357          * Cleanup
3358          *
3359          * NOTE: There is no need to remove OBJ_ONEMAPPING from oobject, the
3360          *       related pages were moved and are no longer applicable to the
3361          *       original object.
3362          *
3363          * NOTE: Deallocate oobject (due to its entry->object.vm_object being
3364          *       replaced by nobject).
3365          */
3366         vm_object_chain_release(nobject);
3367         vm_object_drop(nobject);
3368         if (bobject && useshadowlist) {
3369                 vm_object_chain_release(bobject);
3370                 vm_object_drop(bobject);
3371         }
3372         vm_object_chain_release(oobject);
3373         /*vm_object_clear_flag(oobject, OBJ_ONEMAPPING);*/
3374         vm_object_deallocate_locked(oobject);
3375         vm_object_drop(oobject);
3376 }
3377
3378 /*
3379  * Copies the contents of the source entry to the destination
3380  * entry.  The entries *must* be aligned properly.
3381  *
3382  * The vm_maps must be exclusively locked.
3383  * The vm_map's token must be held.
3384  *
3385  * Because the maps are locked no faults can be in progress during the
3386  * operation.
3387  */
3388 static void
3389 vm_map_copy_entry(vm_map_t src_map, vm_map_t dst_map,
3390                   vm_map_entry_t src_entry, vm_map_entry_t dst_entry)
3391 {
3392         vm_object_t src_object;
3393
3394         if (dst_entry->maptype == VM_MAPTYPE_SUBMAP ||
3395             dst_entry->maptype == VM_MAPTYPE_UKSMAP)
3396                 return;
3397         if (src_entry->maptype == VM_MAPTYPE_SUBMAP ||
3398             src_entry->maptype == VM_MAPTYPE_UKSMAP)
3399                 return;
3400
3401         if (src_entry->wired_count == 0) {
3402                 /*
3403                  * If the source entry is marked needs_copy, it is already
3404                  * write-protected.
3405                  */
3406                 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
3407                         pmap_protect(src_map->pmap,
3408                             src_entry->start,
3409                             src_entry->end,
3410                             src_entry->protection & ~VM_PROT_WRITE);
3411                 }
3412
3413                 /*
3414                  * Make a copy of the object.
3415                  *
3416                  * The object must be locked prior to checking the object type
3417                  * and for the call to vm_object_collapse() and vm_map_split().
3418                  * We cannot use *_hold() here because the split code will
3419                  * probably try to destroy the object.  The lock is a pool
3420                  * token and doesn't care.
3421                  *
3422                  * We must bump src_map->timestamp when setting
3423                  * MAP_ENTRY_NEEDS_COPY to force any concurrent fault
3424                  * to retry, otherwise the concurrent fault might improperly
3425                  * install a RW pte when its supposed to be a RO(COW) pte.
3426                  * This race can occur because a vnode-backed fault may have
3427                  * to temporarily release the map lock.
3428                  */
3429                 if (src_entry->object.vm_object != NULL) {
3430                         vm_map_split(src_entry);
3431                         src_object = src_entry->object.vm_object;
3432                         dst_entry->object.vm_object = src_object;
3433                         src_entry->eflags |= (MAP_ENTRY_COW |
3434                                               MAP_ENTRY_NEEDS_COPY);
3435                         dst_entry->eflags |= (MAP_ENTRY_COW |
3436                                               MAP_ENTRY_NEEDS_COPY);
3437                         dst_entry->offset = src_entry->offset;
3438                         ++src_map->timestamp;
3439                 } else {
3440                         dst_entry->object.vm_object = NULL;
3441                         dst_entry->offset = 0;
3442                 }
3443
3444                 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
3445                     dst_entry->end - dst_entry->start, src_entry->start);
3446         } else {
3447                 /*
3448                  * Of course, wired down pages can't be set copy-on-write.
3449                  * Cause wired pages to be copied into the new map by
3450                  * simulating faults (the new pages are pageable)
3451                  */
3452                 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
3453         }
3454 }
3455
3456 /*
3457  * vmspace_fork:
3458  * Create a new process vmspace structure and vm_map
3459  * based on those of an existing process.  The new map
3460  * is based on the old map, according to the inheritance
3461  * values on the regions in that map.
3462  *
3463  * The source map must not be locked.
3464  * No requirements.
3465  */
3466 static void vmspace_fork_normal_entry(vm_map_t old_map, vm_map_t new_map,
3467                           vm_map_entry_t old_entry, int *countp);
3468 static void vmspace_fork_uksmap_entry(vm_map_t old_map, vm_map_t new_map,
3469                           vm_map_entry_t old_entry, int *countp);
3470
3471 struct vmspace *
3472 vmspace_fork(struct vmspace *vm1)
3473 {
3474         struct vmspace *vm2;
3475         vm_map_t old_map = &vm1->vm_map;
3476         vm_map_t new_map;
3477         vm_map_entry_t old_entry;
3478         int count;
3479
3480         lwkt_gettoken(&vm1->vm_map.token);
3481         vm_map_lock(old_map);
3482
3483         vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
3484         lwkt_gettoken(&vm2->vm_map.token);
3485         bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
3486             (caddr_t)&vm1->vm_endcopy - (caddr_t)&vm1->vm_startcopy);
3487         new_map = &vm2->vm_map; /* XXX */
3488         new_map->timestamp = 1;
3489
3490         vm_map_lock(new_map);
3491
3492         count = 0;
3493         old_entry = old_map->header.next;
3494         while (old_entry != &old_map->header) {
3495                 ++count;
3496                 old_entry = old_entry->next;
3497         }
3498
3499         count = vm_map_entry_reserve(count + MAP_RESERVE_COUNT);
3500
3501         old_entry = old_map->header.next;
3502         while (old_entry != &old_map->header) {
3503                 switch(old_entry->maptype) {
3504                 case VM_MAPTYPE_SUBMAP:
3505                         panic("vm_map_fork: encountered a submap");
3506                         break;
3507                 case VM_MAPTYPE_UKSMAP:
3508                         vmspace_fork_uksmap_entry(old_map, new_map,
3509                                                   old_entry, &count);
3510                         break;
3511                 case VM_MAPTYPE_NORMAL:
3512                 case VM_MAPTYPE_VPAGETABLE:
3513                         vmspace_fork_normal_entry(old_map, new_map,
3514                                                   old_entry, &count);
3515                         break;
3516                 }
3517                 old_entry = old_entry->next;
3518         }
3519
3520         new_map->size = old_map->size;
3521         vm_map_unlock(old_map);
3522         vm_map_unlock(new_map);
3523         vm_map_entry_release(count);
3524
3525         lwkt_reltoken(&vm2->vm_map.token);
3526         lwkt_reltoken(&vm1->vm_map.token);
3527
3528         return (vm2);
3529 }
3530
3531 static
3532 void
3533 vmspace_fork_normal_entry(vm_map_t old_map, vm_map_t new_map,
3534                           vm_map_entry_t old_entry, int *countp)
3535 {
3536         vm_map_entry_t new_entry;
3537         vm_object_t object;
3538
3539         switch (old_entry->inheritance) {
3540         case VM_INHERIT_NONE:
3541                 break;
3542         case VM_INHERIT_SHARE:
3543                 /*
3544                  * Clone the entry, creating the shared object if
3545                  * necessary.
3546                  */
3547                 if (old_entry->object.vm_object == NULL)
3548                         vm_map_entry_allocate_object(old_entry);
3549
3550                 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3551                         /*
3552                          * Shadow a map_entry which needs a copy,
3553                          * replacing its object with a new object
3554                          * that points to the old one.  Ask the
3555                          * shadow code to automatically add an
3556                          * additional ref.  We can't do it afterwords
3557                          * because we might race a collapse.  The call
3558                          * to vm_map_entry_shadow() will also clear
3559                          * OBJ_ONEMAPPING.
3560                          */
3561                         vm_map_entry_shadow(old_entry, 1);
3562                 } else if (old_entry->object.vm_object) {
3563                         /*
3564                          * We will make a shared copy of the object,
3565                          * and must clear OBJ_ONEMAPPING.
3566                          *
3567                          * Optimize vnode objects.  OBJ_ONEMAPPING
3568                          * is non-applicable but clear it anyway,
3569                          * and its terminal so we don'th ave to deal
3570                          * with chains.  Reduces SMP conflicts.
3571                          *
3572                          * XXX assert that object.vm_object != NULL
3573                          *     since we allocate it above.
3574                          */
3575                         object = old_entry->object.vm_object;
3576                         if (object->type == OBJT_VNODE) {
3577                                 vm_object_reference_quick(object);
3578                                 vm_object_clear_flag(object,
3579                                                      OBJ_ONEMAPPING);
3580                         } else {
3581                                 vm_object_hold(object);
3582                                 vm_object_chain_wait(object, 0);
3583                                 vm_object_reference_locked(object);
3584                                 vm_object_clear_flag(object,
3585                                                      OBJ_ONEMAPPING);
3586                                 vm_object_drop(object);
3587                         }
3588                 }
3589
3590                 /*
3591                  * Clone the entry.  We've already bumped the ref on
3592                  * any vm_object.
3593                  */
3594                 new_entry = vm_map_entry_create(new_map, countp);
3595                 *new_entry = *old_entry;
3596                 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3597                 new_entry->wired_count = 0;
3598
3599                 /*
3600                  * Insert the entry into the new map -- we know we're
3601                  * inserting at the end of the new map.
3602                  */
3603
3604                 vm_map_entry_link(new_map, new_map->header.prev,
3605                                   new_entry);
3606
3607                 /*
3608                  * Update the physical map
3609                  */
3610                 pmap_copy(new_map->pmap, old_map->pmap,
3611                           new_entry->start,
3612                           (old_entry->end - old_entry->start),
3613                           old_entry->start);
3614                 break;
3615         case VM_INHERIT_COPY:
3616                 /*
3617                  * Clone the entry and link into the map.
3618                  */
3619                 new_entry = vm_map_entry_create(new_map, countp);
3620                 *new_entry = *old_entry;
3621                 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3622                 new_entry->wired_count = 0;
3623                 new_entry->object.vm_object = NULL;
3624                 vm_map_entry_link(new_map, new_map->header.prev,
3625                                   new_entry);
3626                 vm_map_copy_entry(old_map, new_map, old_entry,
3627                                   new_entry);
3628                 break;
3629         }
3630 }
3631
3632 /*
3633  * When forking user-kernel shared maps, the map might change in the
3634  * child so do not try to copy the underlying pmap entries.
3635  */
3636 static
3637 void
3638 vmspace_fork_uksmap_entry(vm_map_t old_map, vm_map_t new_map,
3639                           vm_map_entry_t old_entry, int *countp)
3640 {
3641         vm_map_entry_t new_entry;
3642
3643         new_entry = vm_map_entry_create(new_map, countp);
3644         *new_entry = *old_entry;
3645         new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3646         new_entry->wired_count = 0;
3647         vm_map_entry_link(new_map, new_map->header.prev,
3648                           new_entry);
3649 }
3650
3651 /*
3652  * Create an auto-grow stack entry
3653  *
3654  * No requirements.
3655  */
3656 int
3657 vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
3658               int flags, vm_prot_t prot, vm_prot_t max, int cow)
3659 {
3660         vm_map_entry_t  prev_entry;
3661         vm_map_entry_t  new_stack_entry;
3662         vm_size_t       init_ssize;
3663         int             rv;
3664         int             count;
3665         vm_offset_t     tmpaddr;
3666
3667         cow |= MAP_IS_STACK;
3668
3669         if (max_ssize < sgrowsiz)
3670                 init_ssize = max_ssize;
3671         else
3672                 init_ssize = sgrowsiz;
3673
3674         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
3675         vm_map_lock(map);
3676
3677         /*
3678          * Find space for the mapping
3679          */
3680         if ((flags & (MAP_FIXED | MAP_TRYFIXED)) == 0) {
3681                 if (vm_map_findspace(map, addrbos, max_ssize, 1,
3682                                      flags, &tmpaddr)) {
3683                         vm_map_unlock(map);
3684                         vm_map_entry_release(count);
3685                         return (KERN_NO_SPACE);
3686                 }
3687                 addrbos = tmpaddr;
3688         }
3689
3690         /* If addr is already mapped, no go */
3691         if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
3692                 vm_map_unlock(map);
3693                 vm_map_entry_release(count);
3694                 return (KERN_NO_SPACE);
3695         }
3696
3697 #if 0
3698         /* XXX already handled by kern_mmap() */
3699         /* If we would blow our VMEM resource limit, no go */
3700         if (map->size + init_ssize >
3701             curproc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
3702                 vm_map_unlock(map);
3703                 vm_map_entry_release(count);
3704                 return (KERN_NO_SPACE);
3705         }
3706 #endif
3707
3708         /*
3709          * If we can't accomodate max_ssize in the current mapping,
3710          * no go.  However, we need to be aware that subsequent user
3711          * mappings might map into the space we have reserved for
3712          * stack, and currently this space is not protected.  
3713          * 
3714          * Hopefully we will at least detect this condition 
3715          * when we try to grow the stack.
3716          */
3717         if ((prev_entry->next != &map->header) &&
3718             (prev_entry->next->start < addrbos + max_ssize)) {
3719                 vm_map_unlock(map);
3720                 vm_map_entry_release(count);
3721                 return (KERN_NO_SPACE);
3722         }
3723
3724         /*
3725          * We initially map a stack of only init_ssize.  We will
3726          * grow as needed later.  Since this is to be a grow 
3727          * down stack, we map at the top of the range.
3728          *
3729          * Note: we would normally expect prot and max to be
3730          * VM_PROT_ALL, and cow to be 0.  Possibly we should
3731          * eliminate these as input parameters, and just
3732          * pass these values here in the insert call.
3733          */
3734         rv = vm_map_insert(map, &count, NULL, NULL,
3735                            0, addrbos + max_ssize - init_ssize,
3736                            addrbos + max_ssize,
3737                            VM_MAPTYPE_NORMAL,
3738                            VM_SUBSYS_STACK, prot, max, cow);
3739
3740         /* Now set the avail_ssize amount */
3741         if (rv == KERN_SUCCESS) {
3742                 if (prev_entry != &map->header)
3743                         vm_map_clip_end(map, prev_entry, addrbos + max_ssize - init_ssize, &count);
3744                 new_stack_entry = prev_entry->next;
3745                 if (new_stack_entry->end   != addrbos + max_ssize ||
3746                     new_stack_entry->start != addrbos + max_ssize - init_ssize)
3747                         panic ("Bad entry start/end for new stack entry");
3748                 else 
3749                         new_stack_entry->aux.avail_ssize = max_ssize - init_ssize;
3750         }
3751
3752         vm_map_unlock(map);
3753         vm_map_entry_release(count);
3754         return (rv);
3755 }
3756
3757 /*
3758  * Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
3759  * desired address is already mapped, or if we successfully grow
3760  * the stack.  Also returns KERN_SUCCESS if addr is outside the
3761  * stack range (this is strange, but preserves compatibility with
3762  * the grow function in vm_machdep.c).
3763  *
3764  * No requirements.
3765  */
3766 int
3767 vm_map_growstack (vm_map_t map, vm_offset_t addr)
3768 {
3769         vm_map_entry_t prev_entry;
3770         vm_map_entry_t stack_entry;
3771         vm_map_entry_t new_stack_entry;
3772         struct vmspace *vm;
3773         struct lwp *lp;
3774         struct proc *p;
3775         vm_offset_t    end;
3776         int grow_amount;
3777         int rv = KERN_SUCCESS;
3778         int is_procstack;
3779         int use_read_lock = 1;
3780         int count;
3781
3782         /*
3783          * Find the vm
3784          */
3785         lp = curthread->td_lwp;
3786         p = curthread->td_proc;
3787         KKASSERT(lp != NULL);
3788         vm = lp->lwp_vmspace;
3789
3790         /*
3791          * Growstack is only allowed on the current process.  We disallow
3792          * other use cases, e.g. trying to access memory via procfs that
3793          * the stack hasn't grown into.
3794          */
3795         if (map != &vm->vm_map) {
3796                 return KERN_FAILURE;
3797         }
3798
3799         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
3800 Retry:
3801         if (use_read_lock)
3802                 vm_map_lock_read(map);
3803         else
3804                 vm_map_lock(map);
3805
3806         /* If addr is already in the entry range, no need to grow.*/
3807         if (vm_map_lookup_entry(map, addr, &prev_entry))
3808                 goto done;
3809
3810         if ((stack_entry = prev_entry->next) == &map->header)
3811                 goto done;
3812         if (prev_entry == &map->header) 
3813                 end = stack_entry->start - stack_entry->aux.avail_ssize;
3814         else
3815                 end = prev_entry->end;
3816
3817         /*
3818          * This next test mimics the old grow function in vm_machdep.c.
3819          * It really doesn't quite make sense, but we do it anyway
3820          * for compatibility.
3821          *
3822          * If not growable stack, return success.  This signals the
3823          * caller to proceed as he would normally with normal vm.
3824          */
3825         if (stack_entry->aux.avail_ssize < 1 ||
3826             addr >= stack_entry->start ||
3827             addr <  stack_entry->start - stack_entry->aux.avail_ssize) {
3828                 goto done;
3829         } 
3830         
3831         /* Find the minimum grow amount */
3832         grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE);
3833         if (grow_amount > stack_entry->aux.avail_ssize) {
3834                 rv = KERN_NO_SPACE;
3835                 goto done;
3836         }
3837
3838         /*
3839          * If there is no longer enough space between the entries
3840          * nogo, and adjust the available space.  Note: this 
3841          * should only happen if the user has mapped into the
3842          * stack area after the stack was created, and is
3843          * probably an error.
3844          *
3845          * This also effectively destroys any guard page the user
3846          * might have intended by limiting the stack size.
3847          */
3848         if (grow_amount > stack_entry->start - end) {
3849                 if (use_read_lock && vm_map_lock_upgrade(map)) {
3850                         /* lost lock */
3851                         use_read_lock = 0;
3852                         goto Retry;
3853                 }
3854                 use_read_lock = 0;
3855                 stack_entry->aux.avail_ssize = stack_entry->start - end;
3856                 rv = KERN_NO_SPACE;
3857                 goto done;
3858         }
3859
3860         is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr;
3861
3862         /* If this is the main process stack, see if we're over the 
3863          * stack limit.
3864          */
3865         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
3866                              p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
3867                 rv = KERN_NO_SPACE;
3868                 goto done;
3869         }
3870
3871         /* Round up the grow amount modulo SGROWSIZ */
3872         grow_amount = roundup (grow_amount, sgrowsiz);
3873         if (grow_amount > stack_entry->aux.avail_ssize) {
3874                 grow_amount = stack_entry->aux.avail_ssize;
3875         }
3876         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
3877                              p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
3878                 grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
3879                               ctob(vm->vm_ssize);
3880         }
3881
3882         /* If we would blow our VMEM resource limit, no go */
3883         if (map->size + grow_amount > p->p_rlimit[RLIMIT_VMEM].rlim_cur) {
3884                 rv = KERN_NO_SPACE;
3885                 goto done;
3886         }
3887
3888         if (use_read_lock && vm_map_lock_upgrade(map)) {
3889                 /* lost lock */
3890                 use_read_lock = 0;
3891                 goto Retry;
3892         }
3893         use_read_lock = 0;
3894
3895         /* Get the preliminary new entry start value */
3896         addr = stack_entry->start - grow_amount;
3897
3898         /* If this puts us into the previous entry, cut back our growth
3899          * to the available space.  Also, see the note above.
3900          */
3901         if (addr < end) {
3902                 stack_entry->aux.avail_ssize = stack_entry->start - end;
3903                 addr = end;
3904         }
3905
3906         rv = vm_map_insert(map, &count, NULL, NULL,
3907                            0, addr, stack_entry->start,
3908                            VM_MAPTYPE_NORMAL,
3909                            VM_SUBSYS_STACK, VM_PROT_ALL, VM_PROT_ALL, 0);
3910
3911         /* Adjust the available stack space by the amount we grew. */
3912         if (rv == KERN_SUCCESS) {
3913                 if (prev_entry != &map->header)
3914                         vm_map_clip_end(map, prev_entry, addr, &count);
3915                 new_stack_entry = prev_entry->next;
3916                 if (new_stack_entry->end   != stack_entry->start  ||
3917                     new_stack_entry->start != addr)
3918                         panic ("Bad stack grow start/end in new stack entry");
3919                 else {
3920                         new_stack_entry->aux.avail_ssize =
3921                                 stack_entry->aux.avail_ssize -
3922                                 (new_stack_entry->end - new_stack_entry->start);
3923                         if (is_procstack)
3924                                 vm->vm_ssize += btoc(new_stack_entry->end -
3925                                                      new_stack_entry->start);
3926                 }
3927
3928                 if (map->flags & MAP_WIREFUTURE)
3929                         vm_map_unwire(map, new_stack_entry->start,
3930                                       new_stack_entry->end, FALSE);
3931         }
3932
3933 done:
3934         if (use_read_lock)
3935                 vm_map_unlock_read(map);
3936         else
3937                 vm_map_unlock(map);
3938         vm_map_entry_release(count);
3939         return (rv);
3940 }
3941
3942 /*
3943  * Unshare the specified VM space for exec.  If other processes are
3944  * mapped to it, then create a new one.  The new vmspace is null.
3945  *
3946  * No requirements.
3947  */
3948 void
3949 vmspace_exec(struct proc *p, struct vmspace *vmcopy) 
3950 {
3951         struct vmspace *oldvmspace = p->p_vmspace;
3952         struct vmspace *newvmspace;
3953         vm_map_t map = &p->p_vmspace->vm_map;
3954
3955         /*
3956          * If we are execing a resident vmspace we fork it, otherwise
3957          * we create a new vmspace.  Note that exitingcnt is not
3958          * copied to the new vmspace.
3959          */
3960         lwkt_gettoken(&oldvmspace->vm_map.token);
3961         if (vmcopy)  {
3962                 newvmspace = vmspace_fork(vmcopy);
3963                 lwkt_gettoken(&newvmspace->vm_map.token);
3964         } else {
3965                 newvmspace = vmspace_alloc(map->min_offset, map->max_offset);
3966                 lwkt_gettoken(&newvmspace->vm_map.token);
3967                 bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
3968                       (caddr_t)&oldvmspace->vm_endcopy -
3969                        (caddr_t)&oldvmspace->vm_startcopy);
3970         }
3971
3972         /*
3973          * Finish initializing the vmspace before assigning it
3974          * to the process.  The vmspace will become the current vmspace
3975          * if p == curproc.
3976          */
3977         pmap_pinit2(vmspace_pmap(newvmspace));
3978         pmap_replacevm(p, newvmspace, 0);
3979         lwkt_reltoken(&newvmspace->vm_map.token);
3980         lwkt_reltoken(&oldvmspace->vm_map.token);
3981         vmspace_rel(oldvmspace);
3982 }
3983
3984 /*
3985  * Unshare the specified VM space for forcing COW.  This
3986  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
3987  */
3988 void
3989 vmspace_unshare(struct proc *p) 
3990 {
3991         struct vmspace *oldvmspace = p->p_vmspace;
3992         struct vmspace *newvmspace;
3993
3994         lwkt_gettoken(&oldvmspace->vm_map.token);
3995         if (vmspace_getrefs(oldvmspace) == 1) {
3996                 lwkt_reltoken(&oldvmspace->vm_map.token);
3997                 return;
3998         }
3999         newvmspace = vmspace_fork(oldvmspace);
4000         lwkt_gettoken(&newvmspace->vm_map.token);
4001         pmap_pinit2(vmspace_pmap(newvmspace));
4002         pmap_replacevm(p, newvmspace, 0);
4003         lwkt_reltoken(&newvmspace->vm_map.token);
4004         lwkt_reltoken(&oldvmspace->vm_map.token);
4005         vmspace_rel(oldvmspace);
4006 }
4007
4008 /*
4009  * vm_map_hint: return the beginning of the best area suitable for
4010  * creating a new mapping with "prot" protection.
4011  *
4012  * No requirements.
4013  */
4014 vm_offset_t
4015 vm_map_hint(struct proc *p, vm_offset_t addr, vm_prot_t prot)
4016 {
4017         struct vmspace *vms = p->p_vmspace;
4018
4019         if (!randomize_mmap || addr != 0) {
4020                 /*
4021                  * Set a reasonable start point for the hint if it was
4022                  * not specified or if it falls within the heap space.
4023                  * Hinted mmap()s do not allocate out of the heap space.
4024                  */
4025                 if (addr == 0 ||
4026                     (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
4027                      addr < round_page((vm_offset_t)vms->vm_daddr + maxdsiz))) {
4028                         addr = round_page((vm_offset_t)vms->vm_daddr + maxdsiz);
4029                 }
4030
4031                 return addr;
4032         }
4033         addr = (vm_offset_t)vms->vm_daddr + MAXDSIZ;
4034         addr += karc4random() & (MIN((256 * 1024 * 1024), MAXDSIZ) - 1);
4035
4036         return (round_page(addr));
4037 }
4038
4039 /*
4040  * Finds the VM object, offset, and protection for a given virtual address
4041  * in the specified map, assuming a page fault of the type specified.
4042  *
4043  * Leaves the map in question locked for read; return values are guaranteed
4044  * until a vm_map_lookup_done call is performed.  Note that the map argument
4045  * is in/out; the returned map must be used in the call to vm_map_lookup_done.
4046  *
4047  * A handle (out_entry) is returned for use in vm_map_lookup_done, to make
4048  * that fast.
4049  *
4050  * If a lookup is requested with "write protection" specified, the map may
4051  * be changed to perform virtual copying operations, although the data
4052  * referenced will remain the same.
4053  *
4054  * No requirements.
4055  */
4056 int
4057 vm_map_lookup(vm_map_t *var_map,                /* IN/OUT */
4058               vm_offset_t vaddr,
4059               vm_prot_t fault_typea,
4060               vm_map_entry_t *out_entry,        /* OUT */
4061               vm_object_t *object,              /* OUT */
4062               vm_pindex_t *pindex,              /* OUT */
4063               vm_prot_t *out_prot,              /* OUT */
4064               boolean_t *wired)                 /* OUT */
4065 {
4066         vm_map_entry_t entry;
4067         vm_map_t map = *var_map;
4068         vm_prot_t prot;
4069         vm_prot_t fault_type = fault_typea;
4070         int use_read_lock = 1;
4071         int rv = KERN_SUCCESS;
4072
4073 RetryLookup:
4074         if (use_read_lock)
4075                 vm_map_lock_read(map);
4076         else
4077                 vm_map_lock(map);
4078
4079         /*
4080          * Always do a full lookup.  The hint doesn't get us much anymore
4081          * now that the map is RB'd.
4082          */
4083         cpu_ccfence();
4084         *out_entry = &map->header;
4085         *object = NULL;
4086
4087         {
4088                 vm_map_entry_t tmp_entry;
4089
4090                 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) {
4091                         rv = KERN_INVALID_ADDRESS;
4092                         goto done;
4093                 }
4094                 entry = tmp_entry;
4095                 *out_entry = entry;
4096         }
4097         
4098         /*
4099          * Handle submaps.
4100          */
4101         if (entry->maptype == VM_MAPTYPE_SUBMAP) {
4102                 vm_map_t old_map = map;
4103
4104                 *var_map = map = entry->object.sub_map;
4105                 if (use_read_lock)
4106                         vm_map_unlock_read(old_map);
4107                 else
4108                         vm_map_unlock(old_map);
4109                 use_read_lock = 1;
4110                 goto RetryLookup;
4111         }
4112
4113         /*
4114          * Check whether this task is allowed to have this page.
4115          * Note the special case for MAP_ENTRY_COW pages with an override.
4116          * This is to implement a forced COW for debuggers.
4117          */
4118         if (fault_type & VM_PROT_OVERRIDE_WRITE)
4119                 prot = entry->max_protection;
4120         else
4121                 prot = entry->protection;
4122
4123         fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
4124         if ((fault_type & prot) != fault_type) {
4125                 rv = KERN_PROTECTION_FAILURE;
4126                 goto done;
4127         }
4128
4129         if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
4130             (entry->eflags & MAP_ENTRY_COW) &&
4131             (fault_type & VM_PROT_WRITE) &&
4132             (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
4133                 rv = KERN_PROTECTION_FAILURE;
4134                 goto done;
4135         }
4136
4137         /*
4138          * If this page is not pageable, we have to get it for all possible
4139          * accesses.
4140          */
4141         *wired = (entry->wired_count != 0);
4142         if (*wired)
4143                 prot = fault_type = entry->protection;
4144
4145         /*
4146          * Virtual page tables may need to update the accessed (A) bit
4147          * in a page table entry.  Upgrade the fault to a write fault for
4148          * that case if the map will support it.  If the map does not support
4149          * it the page table entry simply will not be updated.
4150          */
4151         if (entry->maptype == VM_MAPTYPE_VPAGETABLE) {
4152                 if (prot & VM_PROT_WRITE)
4153                         fault_type |= VM_PROT_WRITE;
4154         }
4155
4156         if (curthread->td_lwp && curthread->td_lwp->lwp_vmspace &&
4157             pmap_emulate_ad_bits(&curthread->td_lwp->lwp_vmspace->vm_pmap)) {
4158                 if ((prot & VM_PROT_WRITE) == 0)
4159                         fault_type |= VM_PROT_WRITE;
4160         }
4161
4162         /*
4163          * Only NORMAL and VPAGETABLE maps are object-based.  UKSMAPs are not.
4164          */
4165         if (entry->maptype != VM_MAPTYPE_NORMAL &&
4166             entry->maptype != VM_MAPTYPE_VPAGETABLE) {
4167                 *object = NULL;
4168                 goto skip;
4169         }
4170
4171         /*
4172          * If the entry was copy-on-write, we either ...
4173          */
4174         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4175                 /*
4176                  * If we want to write the page, we may as well handle that
4177                  * now since we've got the map locked.
4178                  *
4179                  * If we don't need to write the page, we just demote the
4180                  * permissions allowed.
4181                  */
4182
4183                 if (fault_type & VM_PROT_WRITE) {
4184                         /*
4185                          * Not allowed if TDF_NOFAULT is set as the shadowing
4186                          * operation can deadlock against the faulting
4187                          * function due to the copy-on-write.
4188                          */
4189                         if (curthread->td_flags & TDF_NOFAULT) {
4190                                 rv = KERN_FAILURE_NOFAULT;
4191                                 goto done;
4192                         }
4193
4194                         /*
4195                          * Make a new object, and place it in the object
4196                          * chain.  Note that no new references have appeared
4197                          * -- one just moved from the map to the new
4198                          * object.
4199                          */
4200
4201                         if (use_read_lock && vm_map_lock_upgrade(map)) {
4202                                 /* lost lock */
4203                                 use_read_lock = 0;
4204                                 goto RetryLookup;
4205                         }
4206                         use_read_lock = 0;
4207
4208                         vm_map_entry_shadow(entry, 0);
4209                 } else {
4210                         /*
4211                          * We're attempting to read a copy-on-write page --
4212                          * don't allow writes.
4213                          */
4214
4215                         prot &= ~VM_PROT_WRITE;
4216                 }
4217         }
4218
4219         /*
4220          * Create an object if necessary.
4221          */
4222         if (entry->object.vm_object == NULL && !map->system_map) {
4223                 if (use_read_lock && vm_map_lock_upgrade(map))  {
4224                         /* lost lock */
4225                         use_read_lock = 0;
4226                         goto RetryLookup;
4227                 }
4228                 use_read_lock = 0;
4229                 vm_map_entry_allocate_object(entry);
4230         }
4231
4232         /*
4233          * Return the object/offset from this entry.  If the entry was
4234          * copy-on-write or empty, it has been fixed up.
4235          */
4236         *object = entry->object.vm_object;
4237
4238 skip:
4239         *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
4240
4241         /*
4242          * Return whether this is the only map sharing this data.  On
4243          * success we return with a read lock held on the map.  On failure
4244          * we return with the map unlocked.
4245          */
4246         *out_prot = prot;
4247 done:
4248         if (rv == KERN_SUCCESS) {
4249                 if (use_read_lock == 0)
4250                         vm_map_lock_downgrade(map);
4251         } else if (use_read_lock) {
4252                 vm_map_unlock_read(map);
4253         } else {
4254                 vm_map_unlock(map);
4255         }
4256         return (rv);
4257 }
4258
4259 /*
4260  * Releases locks acquired by a vm_map_lookup()
4261  * (according to the handle returned by that lookup).
4262  *
4263  * No other requirements.
4264  */
4265 void
4266 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry, int count)
4267 {
4268         /*
4269          * Unlock the main-level map
4270          */
4271         vm_map_unlock_read(map);
4272         if (count)
4273                 vm_map_entry_release(count);
4274 }
4275
4276 /*
4277  * Quick hack, needs some help to make it more SMP friendly.
4278  */
4279 void
4280 vm_map_interlock(vm_map_t map, struct vm_map_ilock *ilock,
4281                  vm_offset_t ran_beg, vm_offset_t ran_end)
4282 {
4283         struct vm_map_ilock *scan;
4284
4285         ilock->ran_beg = ran_beg;
4286         ilock->ran_end = ran_end;
4287         ilock->flags = 0;
4288
4289         spin_lock(&map->ilock_spin);
4290 restart:
4291         for (scan = map->ilock_base; scan; scan = scan->next) {
4292                 if (ran_end > scan->ran_beg && ran_beg < scan->ran_end) {
4293                         scan->flags |= ILOCK_WAITING;
4294                         ssleep(scan, &map->ilock_spin, 0, "ilock", 0);
4295                         goto restart;
4296                 }
4297         }
4298         ilock->next = map->ilock_base;
4299         map->ilock_base = ilock;
4300         spin_unlock(&map->ilock_spin);
4301 }
4302
4303 void
4304 vm_map_deinterlock(vm_map_t map, struct  vm_map_ilock *ilock)
4305 {
4306         struct vm_map_ilock *scan;
4307         struct vm_map_ilock **scanp;
4308
4309         spin_lock(&map->ilock_spin);
4310         scanp = &map->ilock_base;
4311         while ((scan = *scanp) != NULL) {
4312                 if (scan == ilock) {
4313                         *scanp = ilock->next;
4314                         spin_unlock(&map->ilock_spin);
4315                         if (ilock->flags & ILOCK_WAITING)
4316                                 wakeup(ilock);
4317                         return;
4318                 }
4319                 scanp = &scan->next;
4320         }
4321         spin_unlock(&map->ilock_spin);
4322         panic("vm_map_deinterlock: missing ilock!");
4323 }
4324
4325 #include "opt_ddb.h"
4326 #ifdef DDB
4327 #include <ddb/ddb.h>
4328
4329 /*
4330  * Debugging only
4331  */
4332 DB_SHOW_COMMAND(map, vm_map_print)
4333 {
4334         static int nlines;
4335         /* XXX convert args. */
4336         vm_map_t map = (vm_map_t)addr;
4337         boolean_t full = have_addr;
4338
4339         vm_map_entry_t entry;
4340
4341         db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
4342             (void *)map,
4343             (void *)map->pmap, map->nentries, map->timestamp);
4344         nlines++;
4345
4346         if (!full && db_indent)
4347                 return;
4348
4349         db_indent += 2;
4350         for (entry = map->header.next; entry != &map->header;
4351             entry = entry->next) {
4352                 db_iprintf("map entry %p: start=%p, end=%p\n",
4353                     (void *)entry, (void *)entry->start, (void *)entry->end);
4354                 nlines++;
4355                 {
4356                         static char *inheritance_name[4] =
4357                         {"share", "copy", "none", "donate_copy"};
4358
4359                         db_iprintf(" prot=%x/%x/%s",
4360                             entry->protection,
4361                             entry->max_protection,
4362                             inheritance_name[(int)(unsigned char)
4363                                                 entry->inheritance]);
4364                         if (entry->wired_count != 0)
4365                                 db_printf(", wired");
4366                 }
4367                 switch(entry->maptype) {
4368                 case VM_MAPTYPE_SUBMAP:
4369                         /* XXX no %qd in kernel.  Truncate entry->offset. */
4370                         db_printf(", share=%p, offset=0x%lx\n",
4371                             (void *)entry->object.sub_map,
4372                             (long)entry->offset);
4373                         nlines++;
4374                         if ((entry->prev == &map->header) ||
4375                             (entry->prev->object.sub_map !=
4376                                 entry->object.sub_map)) {
4377                                 db_indent += 2;
4378                                 vm_map_print((db_expr_t)(intptr_t)
4379                                              entry->object.sub_map,
4380                                              full, 0, NULL);
4381                                 db_indent -= 2;
4382                         }
4383                         break;
4384                 case VM_MAPTYPE_NORMAL:
4385                 case VM_MAPTYPE_VPAGETABLE:
4386                         /* XXX no %qd in kernel.  Truncate entry->offset. */
4387                         db_printf(", object=%p, offset=0x%lx",
4388                             (void *)entry->object.vm_object,
4389                             (long)entry->offset);
4390                         if (entry->eflags & MAP_ENTRY_COW)
4391                                 db_printf(", copy (%s)",
4392                                     (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
4393                         db_printf("\n");
4394                         nlines++;
4395
4396                         if ((entry->prev == &map->header) ||
4397                             (entry->prev->object.vm_object !=
4398                                 entry->object.vm_object)) {
4399                                 db_indent += 2;
4400                                 vm_object_print((db_expr_t)(intptr_t)
4401                                                 entry->object.vm_object,
4402                                                 full, 0, NULL);
4403                                 nlines += 4;
4404                                 db_indent -= 2;
4405                         }
4406                         break;
4407                 case VM_MAPTYPE_UKSMAP:
4408                         db_printf(", uksmap=%p, offset=0x%lx",
4409                             (void *)entry->object.uksmap,
4410                             (long)entry->offset);
4411                         if (entry->eflags & MAP_ENTRY_COW)
4412                                 db_printf(", copy (%s)",
4413                                     (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
4414                         db_printf("\n");
4415                         nlines++;
4416                         break;
4417                 default:
4418                         break;
4419                 }
4420         }
4421         db_indent -= 2;
4422         if (db_indent == 0)
4423                 nlines = 0;
4424 }
4425
4426 /*
4427  * Debugging only
4428  */
4429 DB_SHOW_COMMAND(procvm, procvm)
4430 {
4431         struct proc *p;
4432
4433         if (have_addr) {
4434                 p = (struct proc *) addr;
4435         } else {
4436                 p = curproc;
4437         }
4438
4439         db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
4440             (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
4441             (void *)vmspace_pmap(p->p_vmspace));
4442
4443         vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
4444 }
4445
4446 #endif /* DDB */