Make adjustments to how MAP_STACK works to prevent improper mmap()s.
[dragonfly.git] / sys / vm / vm_map.c
1 /*
2  * Copyright (c) 1991, 1993
3  *      The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by the University of
19  *      California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *      from: @(#)vm_map.c      8.3 (Berkeley) 1/12/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $FreeBSD: src/sys/vm/vm_map.c,v 1.187.2.19 2003/05/27 00:47:02 alc Exp $
65  * $DragonFly: src/sys/vm/vm_map.c,v 1.56 2007/04/29 18:25:41 dillon Exp $
66  */
67
68 /*
69  *      Virtual memory mapping module.
70  */
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/kernel.h>
75 #include <sys/proc.h>
76 #include <sys/lock.h>
77 #include <sys/vmmeter.h>
78 #include <sys/mman.h>
79 #include <sys/vnode.h>
80 #include <sys/resourcevar.h>
81 #include <sys/shm.h>
82 #include <sys/tree.h>
83 #include <sys/malloc.h>
84
85 #include <vm/vm.h>
86 #include <vm/vm_param.h>
87 #include <vm/pmap.h>
88 #include <vm/vm_map.h>
89 #include <vm/vm_page.h>
90 #include <vm/vm_object.h>
91 #include <vm/vm_pager.h>
92 #include <vm/vm_kern.h>
93 #include <vm/vm_extern.h>
94 #include <vm/swap_pager.h>
95 #include <vm/vm_zone.h>
96
97 #include <sys/thread2.h>
98 #include <sys/sysref2.h>
99
100 /*
101  *      Virtual memory maps provide for the mapping, protection,
102  *      and sharing of virtual memory objects.  In addition,
103  *      this module provides for an efficient virtual copy of
104  *      memory from one map to another.
105  *
106  *      Synchronization is required prior to most operations.
107  *
108  *      Maps consist of an ordered doubly-linked list of simple
109  *      entries; a single hint is used to speed up lookups.
110  *
111  *      Since portions of maps are specified by start/end addresses,
112  *      which may not align with existing map entries, all
113  *      routines merely "clip" entries to these start/end values.
114  *      [That is, an entry is split into two, bordering at a
115  *      start or end value.]  Note that these clippings may not
116  *      always be necessary (as the two resulting entries are then
117  *      not changed); however, the clipping is done for convenience.
118  *
119  *      As mentioned above, virtual copy operations are performed
120  *      by copying VM object references from one map to
121  *      another, and then marking both regions as copy-on-write.
122  */
123
124 static void vmspace_terminate(struct vmspace *vm);
125 static void vmspace_dtor(void *obj, void *private);
126
127 MALLOC_DEFINE(M_VMSPACE, "vmspace", "vmspace objcache backingstore");
128
129 struct sysref_class vmspace_sysref_class = {
130         .name =         "vmspace",
131         .mtype =        M_VMSPACE,
132         .proto =        SYSREF_PROTO_VMSPACE,
133         .offset =       offsetof(struct vmspace, vm_sysref),
134         .objsize =      sizeof(struct vmspace),
135         .mag_capacity = 32,
136         .flags = SRC_MANAGEDINIT,
137         .dtor = vmspace_dtor,
138         .ops = {
139                 .terminate = (sysref_terminate_func_t)vmspace_terminate
140         }
141 };
142
143 #define VMEPERCPU       2
144
145 static struct vm_zone mapentzone_store, mapzone_store;
146 static vm_zone_t mapentzone, mapzone;
147 static struct vm_object mapentobj, mapobj;
148
149 static struct vm_map_entry map_entry_init[MAX_MAPENT];
150 static struct vm_map_entry cpu_map_entry_init[MAXCPU][VMEPERCPU];
151 static struct vm_map map_init[MAX_KMAP];
152
153 static void vm_map_entry_shadow(vm_map_entry_t entry);
154 static vm_map_entry_t vm_map_entry_create(vm_map_t map, int *);
155 static void vm_map_entry_dispose (vm_map_t map, vm_map_entry_t entry, int *);
156 static void _vm_map_clip_end (vm_map_t, vm_map_entry_t, vm_offset_t, int *);
157 static void _vm_map_clip_start (vm_map_t, vm_map_entry_t, vm_offset_t, int *);
158 static void vm_map_entry_delete (vm_map_t, vm_map_entry_t, int *);
159 static void vm_map_entry_unwire (vm_map_t, vm_map_entry_t);
160 static void vm_map_copy_entry (vm_map_t, vm_map_t, vm_map_entry_t,
161                 vm_map_entry_t);
162 static void vm_map_split (vm_map_entry_t);
163 static void vm_map_unclip_range (vm_map_t map, vm_map_entry_t start_entry, vm_offset_t start, vm_offset_t end, int *count, int flags);
164
165 /*
166  *      vm_map_startup:
167  *
168  *      Initialize the vm_map module.  Must be called before
169  *      any other vm_map routines.
170  *
171  *      Map and entry structures are allocated from the general
172  *      purpose memory pool with some exceptions:
173  *
174  *      - The kernel map and kmem submap are allocated statically.
175  *      - Kernel map entries are allocated out of a static pool.
176  *
177  *      These restrictions are necessary since malloc() uses the
178  *      maps and requires map entries.
179  */
180 void
181 vm_map_startup(void)
182 {
183         mapzone = &mapzone_store;
184         zbootinit(mapzone, "MAP", sizeof (struct vm_map),
185                 map_init, MAX_KMAP);
186         mapentzone = &mapentzone_store;
187         zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry),
188                 map_entry_init, MAX_MAPENT);
189 }
190
191 /*
192  *      vm_init2 - called prior to any vmspace allocations
193  */
194 void
195 vm_init2(void) 
196 {
197         zinitna(mapentzone, &mapentobj, NULL, 0, 0, 
198                 ZONE_USE_RESERVE | ZONE_SPECIAL, 1);
199         zinitna(mapzone, &mapobj, NULL, 0, 0, 0, 1);
200         pmap_init2();
201         vm_object_init2();
202 }
203
204
205 /*
206  * Red black tree functions
207  */
208 static int rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b);
209 RB_GENERATE(vm_map_rb_tree, vm_map_entry, rb_entry, rb_vm_map_compare);
210
211 /* a->start is address, and the only field has to be initialized */
212 static int
213 rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b)
214 {
215         if (a->start < b->start)
216                 return(-1);
217         else if (a->start > b->start)
218                 return(1);
219         return(0);
220 }
221
222 /*
223  * Allocate a vmspace structure, including a vm_map and pmap.
224  * Initialize numerous fields.  While the initial allocation is zerod,
225  * subsequence reuse from the objcache leaves elements of the structure
226  * intact (particularly the pmap), so portions must be zerod.
227  *
228  * The structure is not considered activated until we call sysref_activate().
229  */
230 struct vmspace *
231 vmspace_alloc(vm_offset_t min, vm_offset_t max)
232 {
233         struct vmspace *vm;
234
235         vm = sysref_alloc(&vmspace_sysref_class);
236         bzero(&vm->vm_startcopy,
237               (char *)&vm->vm_endcopy - (char *)&vm->vm_startcopy);
238         vm_map_init(&vm->vm_map, min, max, NULL);
239         pmap_pinit(vmspace_pmap(vm));           /* (some fields reused) */
240         vm->vm_map.pmap = vmspace_pmap(vm);             /* XXX */
241         vm->vm_shm = NULL;
242         vm->vm_exitingcnt = 0;
243         cpu_vmspace_alloc(vm);
244         sysref_activate(&vm->vm_sysref);
245         return (vm);
246 }
247
248 /*
249  * dtor function - Some elements of the pmap are retained in the
250  * free-cached vmspaces to improve performance.  We have to clean them up
251  * here before returning the vmspace to the memory pool.
252  */
253 static void
254 vmspace_dtor(void *obj, void *private)
255 {
256         struct vmspace *vm = obj;
257
258         pmap_puninit(vmspace_pmap(vm));
259 }
260
261 /*
262  * Called in two cases: 
263  *
264  * (1) When the last sysref is dropped, but exitingcnt might still be
265  *     non-zero.
266  *
267  * (2) When there are no sysrefs (i.e. refcnt is negative) left and the
268  *     exitingcnt becomes zero
269  *
270  * sysref will not scrap the object until we call sysref_put() once more
271  * after the last ref has been dropped.
272  */
273 static void
274 vmspace_terminate(struct vmspace *vm)
275 {
276         int count;
277
278         /*
279          * If exitingcnt is non-zero we can't get rid of the entire vmspace
280          * yet, but we can scrap user memory.
281          */
282         if (vm->vm_exitingcnt) {
283                 shmexit(vm);
284                 pmap_remove_pages(vmspace_pmap(vm), VM_MIN_USER_ADDRESS,
285                                   VM_MAX_USER_ADDRESS);
286                 vm_map_remove(&vm->vm_map, VM_MIN_USER_ADDRESS,
287                               VM_MAX_USER_ADDRESS);
288
289                 return;
290         }
291         cpu_vmspace_free(vm);
292
293         /*
294          * Make sure any SysV shm is freed, it might not have in
295          * exit1()
296          */
297         shmexit(vm);
298
299         KKASSERT(vm->vm_upcalls == NULL);
300
301         /*
302          * Lock the map, to wait out all other references to it.
303          * Delete all of the mappings and pages they hold, then call
304          * the pmap module to reclaim anything left.
305          */
306         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
307         vm_map_lock(&vm->vm_map);
308         vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
309                 vm->vm_map.max_offset, &count);
310         vm_map_unlock(&vm->vm_map);
311         vm_map_entry_release(count);
312
313         pmap_release(vmspace_pmap(vm));
314         sysref_put(&vm->vm_sysref);
315 }
316
317 /*
318  * This is called in the wait*() handling code.  The vmspace can be terminated
319  * after the last wait is finished using it.
320  */
321 void
322 vmspace_exitfree(struct proc *p)
323 {
324         struct vmspace *vm;
325
326         vm = p->p_vmspace;
327         p->p_vmspace = NULL;
328
329         if (--vm->vm_exitingcnt == 0 && sysref_isinactive(&vm->vm_sysref))
330                 vmspace_terminate(vm);
331 }
332
333 /*
334  * vmspace_swap_count()
335  *
336  *      Swap useage is determined by taking the proportional swap used by
337  *      VM objects backing the VM map.  To make up for fractional losses,
338  *      if the VM object has any swap use at all the associated map entries
339  *      count for at least 1 swap page.
340  */
341 int
342 vmspace_swap_count(struct vmspace *vmspace)
343 {
344         vm_map_t map = &vmspace->vm_map;
345         vm_map_entry_t cur;
346         vm_object_t object;
347         int count = 0;
348         int n;
349
350         for (cur = map->header.next; cur != &map->header; cur = cur->next) {
351                 switch(cur->maptype) {
352                 case VM_MAPTYPE_NORMAL:
353                 case VM_MAPTYPE_VPAGETABLE:
354                         if ((object = cur->object.vm_object) == NULL)
355                                 break;
356                         if (object->type != OBJT_SWAP)
357                                 break;
358                         n = (cur->end - cur->start) / PAGE_SIZE;
359                         if (object->un_pager.swp.swp_bcount) {
360                                 count += object->un_pager.swp.swp_bcount *
361                                     SWAP_META_PAGES * n / object->size + 1;
362                         }
363                         break;
364                 default:
365                         break;
366                 }
367         }
368         return(count);
369 }
370
371 /*
372  * vmspace_anonymous_count()
373  *
374  *      Calculate the approximate number of anonymous pages in use by
375  *      this vmspace.  To make up for fractional losses, we count each
376  *      VM object as having at least 1 anonymous page.
377  */
378 int
379 vmspace_anonymous_count(struct vmspace *vmspace)
380 {
381         vm_map_t map = &vmspace->vm_map;
382         vm_map_entry_t cur;
383         vm_object_t object;
384         int count = 0;
385
386         for (cur = map->header.next; cur != &map->header; cur = cur->next) {
387                 switch(cur->maptype) {
388                 case VM_MAPTYPE_NORMAL:
389                 case VM_MAPTYPE_VPAGETABLE:
390                         if ((object = cur->object.vm_object) == NULL)
391                                 break;
392                         if (object->type != OBJT_DEFAULT &&
393                             object->type != OBJT_SWAP) {
394                                 break;
395                         }
396                         count += object->resident_page_count;
397                         break;
398                 default:
399                         break;
400                 }
401         }
402         return(count);
403 }
404
405
406
407
408 /*
409  *      vm_map_create:
410  *
411  *      Creates and returns a new empty VM map with
412  *      the given physical map structure, and having
413  *      the given lower and upper address bounds.
414  */
415 vm_map_t
416 vm_map_create(vm_map_t result, pmap_t pmap, vm_offset_t min, vm_offset_t max)
417 {
418         if (result == NULL)
419                 result = zalloc(mapzone);
420         vm_map_init(result, min, max, pmap);
421         return (result);
422 }
423
424 /*
425  * Initialize an existing vm_map structure
426  * such as that in the vmspace structure.
427  * The pmap is set elsewhere.
428  */
429 void
430 vm_map_init(struct vm_map *map, vm_offset_t min, vm_offset_t max, pmap_t pmap)
431 {
432         map->header.next = map->header.prev = &map->header;
433         RB_INIT(&map->rb_root);
434         map->nentries = 0;
435         map->size = 0;
436         map->system_map = 0;
437         map->infork = 0;
438         map->min_offset = min;
439         map->max_offset = max;
440         map->pmap = pmap;
441         map->first_free = &map->header;
442         map->hint = &map->header;
443         map->timestamp = 0;
444         lockinit(&map->lock, "thrd_sleep", 0, 0);
445 }
446
447 /*
448  * Shadow the vm_map_entry's object.  This typically needs to be done when
449  * a write fault is taken on an entry which had previously been cloned by
450  * fork().  The shared object (which might be NULL) must become private so
451  * we add a shadow layer above it.
452  *
453  * Object allocation for anonymous mappings is defered as long as possible.
454  * When creating a shadow, however, the underlying object must be instantiated
455  * so it can be shared.
456  *
457  * If the map segment is governed by a virtual page table then it is
458  * possible to address offsets beyond the mapped area.  Just allocate
459  * a maximally sized object for this case.
460  */
461 static
462 void
463 vm_map_entry_shadow(vm_map_entry_t entry)
464 {
465         if (entry->maptype == VM_MAPTYPE_VPAGETABLE) {
466                 vm_object_shadow(&entry->object.vm_object, &entry->offset,
467                                  0x7FFFFFFF);   /* XXX */
468         } else {
469                 vm_object_shadow(&entry->object.vm_object, &entry->offset,
470                                  atop(entry->end - entry->start));
471         }
472         entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
473 }
474
475 /*
476  * Allocate an object for a vm_map_entry.
477  *
478  * Object allocation for anonymous mappings is defered as long as possible.
479  * This function is called when we can defer no longer, generally when a map
480  * entry might be split or forked or takes a page fault.
481  *
482  * If the map segment is governed by a virtual page table then it is
483  * possible to address offsets beyond the mapped area.  Just allocate
484  * a maximally sized object for this case.
485  */
486 void 
487 vm_map_entry_allocate_object(vm_map_entry_t entry)
488 {
489         vm_object_t obj;
490
491         if (entry->maptype == VM_MAPTYPE_VPAGETABLE) {
492                 obj = vm_object_allocate(OBJT_DEFAULT, 0x7FFFFFFF); /* XXX */
493         } else {
494                 obj = vm_object_allocate(OBJT_DEFAULT,
495                                          atop(entry->end - entry->start));
496         }
497         entry->object.vm_object = obj;
498         entry->offset = 0;
499 }
500
501 /*
502  *      vm_map_entry_reserve_cpu_init:
503  *
504  *      Set an initial negative count so the first attempt to reserve
505  *      space preloads a bunch of vm_map_entry's for this cpu.  Also
506  *      pre-allocate 2 vm_map_entries which will be needed by zalloc() to
507  *      map a new page for vm_map_entry structures.  SMP systems are
508  *      particularly sensitive.
509  *
510  *      This routine is called in early boot so we cannot just call
511  *      vm_map_entry_reserve().
512  *
513  *      May be called for a gd other then mycpu, but may only be called
514  *      during early boot.
515  */
516 void
517 vm_map_entry_reserve_cpu_init(globaldata_t gd)
518 {
519         vm_map_entry_t entry;
520         int i;
521
522         gd->gd_vme_avail -= MAP_RESERVE_COUNT * 2;
523         entry = &cpu_map_entry_init[gd->gd_cpuid][0];
524         for (i = 0; i < VMEPERCPU; ++i, ++entry) {
525                 entry->next = gd->gd_vme_base;
526                 gd->gd_vme_base = entry;
527         }
528 }
529
530 /*
531  *      vm_map_entry_reserve:
532  *
533  *      Reserves vm_map_entry structures so code later on can manipulate
534  *      map_entry structures within a locked map without blocking trying
535  *      to allocate a new vm_map_entry.
536  */
537 int
538 vm_map_entry_reserve(int count)
539 {
540         struct globaldata *gd = mycpu;
541         vm_map_entry_t entry;
542
543         crit_enter();
544
545         /*
546          * Make sure we have enough structures in gd_vme_base to handle
547          * the reservation request.
548          */
549         while (gd->gd_vme_avail < count) {
550                 entry = zalloc(mapentzone);
551                 entry->next = gd->gd_vme_base;
552                 gd->gd_vme_base = entry;
553                 ++gd->gd_vme_avail;
554         }
555         gd->gd_vme_avail -= count;
556         crit_exit();
557         return(count);
558 }
559
560 /*
561  *      vm_map_entry_release:
562  *
563  *      Releases previously reserved vm_map_entry structures that were not
564  *      used.  If we have too much junk in our per-cpu cache clean some of
565  *      it out.
566  */
567 void
568 vm_map_entry_release(int count)
569 {
570         struct globaldata *gd = mycpu;
571         vm_map_entry_t entry;
572
573         crit_enter();
574         gd->gd_vme_avail += count;
575         while (gd->gd_vme_avail > MAP_RESERVE_SLOP) {
576                 entry = gd->gd_vme_base;
577                 KKASSERT(entry != NULL);
578                 gd->gd_vme_base = entry->next;
579                 --gd->gd_vme_avail;
580                 crit_exit();
581                 zfree(mapentzone, entry);
582                 crit_enter();
583         }
584         crit_exit();
585 }
586
587 /*
588  *      vm_map_entry_kreserve:
589  *
590  *      Reserve map entry structures for use in kernel_map itself.  These
591  *      entries have *ALREADY* been reserved on a per-cpu basis when the map
592  *      was inited.  This function is used by zalloc() to avoid a recursion
593  *      when zalloc() itself needs to allocate additional kernel memory.
594  *
595  *      This function works like the normal reserve but does not load the
596  *      vm_map_entry cache (because that would result in an infinite
597  *      recursion).  Note that gd_vme_avail may go negative.  This is expected.
598  *
599  *      Any caller of this function must be sure to renormalize after 
600  *      potentially eating entries to ensure that the reserve supply
601  *      remains intact.
602  */
603 int
604 vm_map_entry_kreserve(int count)
605 {
606         struct globaldata *gd = mycpu;
607
608         crit_enter();
609         gd->gd_vme_avail -= count;
610         crit_exit();
611         KASSERT(gd->gd_vme_base != NULL, ("no reserved entries left, gd_vme_avail = %d\n", gd->gd_vme_avail));
612         return(count);
613 }
614
615 /*
616  *      vm_map_entry_krelease:
617  *
618  *      Release previously reserved map entries for kernel_map.  We do not
619  *      attempt to clean up like the normal release function as this would
620  *      cause an unnecessary (but probably not fatal) deep procedure call.
621  */
622 void
623 vm_map_entry_krelease(int count)
624 {
625         struct globaldata *gd = mycpu;
626
627         crit_enter();
628         gd->gd_vme_avail += count;
629         crit_exit();
630 }
631
632 /*
633  *      vm_map_entry_create:    [ internal use only ]
634  *
635  *      Allocates a VM map entry for insertion.  No entry fields are filled 
636  *      in.
637  *
638  *      This routine may be called from an interrupt thread but not a FAST
639  *      interrupt.  This routine may recurse the map lock.
640  */
641 static vm_map_entry_t
642 vm_map_entry_create(vm_map_t map, int *countp)
643 {
644         struct globaldata *gd = mycpu;
645         vm_map_entry_t entry;
646
647         KKASSERT(*countp > 0);
648         --*countp;
649         crit_enter();
650         entry = gd->gd_vme_base;
651         KASSERT(entry != NULL, ("gd_vme_base NULL! count %d", *countp));
652         gd->gd_vme_base = entry->next;
653         crit_exit();
654         return(entry);
655 }
656
657 /*
658  *      vm_map_entry_dispose:   [ internal use only ]
659  *
660  *      Dispose of a vm_map_entry that is no longer being referenced.  This
661  *      function may be called from an interrupt.
662  */
663 static void
664 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry, int *countp)
665 {
666         struct globaldata *gd = mycpu;
667
668         KKASSERT(map->hint != entry);
669         KKASSERT(map->first_free != entry);
670
671         ++*countp;
672         crit_enter();
673         entry->next = gd->gd_vme_base;
674         gd->gd_vme_base = entry;
675         crit_exit();
676 }
677
678
679 /*
680  *      vm_map_entry_{un,}link:
681  *
682  *      Insert/remove entries from maps.
683  */
684 static __inline void
685 vm_map_entry_link(vm_map_t map,
686                   vm_map_entry_t after_where,
687                   vm_map_entry_t entry)
688 {
689         map->nentries++;
690         entry->prev = after_where;
691         entry->next = after_where->next;
692         entry->next->prev = entry;
693         after_where->next = entry;
694         if (vm_map_rb_tree_RB_INSERT(&map->rb_root, entry))
695                 panic("vm_map_entry_link: dup addr map %p ent %p", map, entry);
696 }
697
698 static __inline void
699 vm_map_entry_unlink(vm_map_t map,
700                     vm_map_entry_t entry)
701 {
702         vm_map_entry_t prev;
703         vm_map_entry_t next;
704
705         if (entry->eflags & MAP_ENTRY_IN_TRANSITION)
706                 panic("vm_map_entry_unlink: attempt to mess with locked entry! %p", entry);
707         prev = entry->prev;
708         next = entry->next;
709         next->prev = prev;
710         prev->next = next;
711         vm_map_rb_tree_RB_REMOVE(&map->rb_root, entry);
712         map->nentries--;
713 }
714
715 /*
716  *      vm_map_lookup_entry:    [ internal use only ]
717  *
718  *      Finds the map entry containing (or
719  *      immediately preceding) the specified address
720  *      in the given map; the entry is returned
721  *      in the "entry" parameter.  The boolean
722  *      result indicates whether the address is
723  *      actually contained in the map.
724  */
725 boolean_t
726 vm_map_lookup_entry(vm_map_t map, vm_offset_t address,
727     vm_map_entry_t *entry /* OUT */)
728 {
729         vm_map_entry_t tmp;
730         vm_map_entry_t last;
731
732 #if 0
733         /*
734          * XXX TEMPORARILY DISABLED.  For some reason our attempt to revive
735          * the hint code with the red-black lookup meets with system crashes
736          * and lockups.  We do not yet know why.
737          *
738          * It is possible that the problem is related to the setting
739          * of the hint during map_entry deletion, in the code specified
740          * at the GGG comment later on in this file.
741          */
742         /*
743          * Quickly check the cached hint, there's a good chance of a match.
744          */
745         if (map->hint != &map->header) {
746                 tmp = map->hint;
747                 if (address >= tmp->start && address < tmp->end) {
748                         *entry = tmp;
749                         return(TRUE);
750                 }
751         }
752 #endif
753
754         /*
755          * Locate the record from the top of the tree.  'last' tracks the
756          * closest prior record and is returned if no match is found, which
757          * in binary tree terms means tracking the most recent right-branch
758          * taken.  If there is no prior record, &map->header is returned.
759          */
760         last = &map->header;
761         tmp = RB_ROOT(&map->rb_root);
762
763         while (tmp) {
764                 if (address >= tmp->start) {
765                         if (address < tmp->end) {
766                                 *entry = tmp;
767                                 map->hint = tmp;
768                                 return(TRUE);
769                         }
770                         last = tmp;
771                         tmp = RB_RIGHT(tmp, rb_entry);
772                 } else {
773                         tmp = RB_LEFT(tmp, rb_entry);
774                 }
775         }
776         *entry = last;
777         return (FALSE);
778 }
779
780 /*
781  *      vm_map_insert:
782  *
783  *      Inserts the given whole VM object into the target
784  *      map at the specified address range.  The object's
785  *      size should match that of the address range.
786  *
787  *      Requires that the map be locked, and leaves it so.  Requires that
788  *      sufficient vm_map_entry structures have been reserved and tracks
789  *      the use via countp.
790  *
791  *      If object is non-NULL, ref count must be bumped by caller
792  *      prior to making call to account for the new entry.
793  */
794 int
795 vm_map_insert(vm_map_t map, int *countp,
796               vm_object_t object, vm_ooffset_t offset,
797               vm_offset_t start, vm_offset_t end,
798               vm_maptype_t maptype,
799               vm_prot_t prot, vm_prot_t max,
800               int cow)
801 {
802         vm_map_entry_t new_entry;
803         vm_map_entry_t prev_entry;
804         vm_map_entry_t temp_entry;
805         vm_eflags_t protoeflags;
806
807         /*
808          * Check that the start and end points are not bogus.
809          */
810
811         if ((start < map->min_offset) || (end > map->max_offset) ||
812             (start >= end))
813                 return (KERN_INVALID_ADDRESS);
814
815         /*
816          * Find the entry prior to the proposed starting address; if it's part
817          * of an existing entry, this range is bogus.
818          */
819
820         if (vm_map_lookup_entry(map, start, &temp_entry))
821                 return (KERN_NO_SPACE);
822
823         prev_entry = temp_entry;
824
825         /*
826          * Assert that the next entry doesn't overlap the end point.
827          */
828
829         if ((prev_entry->next != &map->header) &&
830             (prev_entry->next->start < end))
831                 return (KERN_NO_SPACE);
832
833         protoeflags = 0;
834
835         if (cow & MAP_COPY_ON_WRITE)
836                 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
837
838         if (cow & MAP_NOFAULT) {
839                 protoeflags |= MAP_ENTRY_NOFAULT;
840
841                 KASSERT(object == NULL,
842                         ("vm_map_insert: paradoxical MAP_NOFAULT request"));
843         }
844         if (cow & MAP_DISABLE_SYNCER)
845                 protoeflags |= MAP_ENTRY_NOSYNC;
846         if (cow & MAP_DISABLE_COREDUMP)
847                 protoeflags |= MAP_ENTRY_NOCOREDUMP;
848         if (cow & MAP_IS_STACK)
849                 protoeflags |= MAP_ENTRY_STACK;
850
851         if (object) {
852                 /*
853                  * When object is non-NULL, it could be shared with another
854                  * process.  We have to set or clear OBJ_ONEMAPPING 
855                  * appropriately.
856                  */
857                 if ((object->ref_count > 1) || (object->shadow_count != 0)) {
858                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
859                 }
860         }
861         else if ((prev_entry != &map->header) &&
862                  (prev_entry->eflags == protoeflags) &&
863                  (prev_entry->end == start) &&
864                  (prev_entry->wired_count == 0) &&
865                  prev_entry->maptype == maptype &&
866                  ((prev_entry->object.vm_object == NULL) ||
867                   vm_object_coalesce(prev_entry->object.vm_object,
868                                      OFF_TO_IDX(prev_entry->offset),
869                                      (vm_size_t)(prev_entry->end - prev_entry->start),
870                                      (vm_size_t)(end - prev_entry->end)))) {
871                 /*
872                  * We were able to extend the object.  Determine if we
873                  * can extend the previous map entry to include the 
874                  * new range as well.
875                  */
876                 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
877                     (prev_entry->protection == prot) &&
878                     (prev_entry->max_protection == max)) {
879                         map->size += (end - prev_entry->end);
880                         prev_entry->end = end;
881                         vm_map_simplify_entry(map, prev_entry, countp);
882                         return (KERN_SUCCESS);
883                 }
884
885                 /*
886                  * If we can extend the object but cannot extend the
887                  * map entry, we have to create a new map entry.  We
888                  * must bump the ref count on the extended object to
889                  * account for it.  object may be NULL.
890                  */
891                 object = prev_entry->object.vm_object;
892                 offset = prev_entry->offset +
893                         (prev_entry->end - prev_entry->start);
894                 vm_object_reference(object);
895         }
896
897         /*
898          * NOTE: if conditionals fail, object can be NULL here.  This occurs
899          * in things like the buffer map where we manage kva but do not manage
900          * backing objects.
901          */
902
903         /*
904          * Create a new entry
905          */
906
907         new_entry = vm_map_entry_create(map, countp);
908         new_entry->start = start;
909         new_entry->end = end;
910
911         new_entry->maptype = maptype;
912         new_entry->eflags = protoeflags;
913         new_entry->object.vm_object = object;
914         new_entry->offset = offset;
915         new_entry->aux.master_pde = 0;
916
917         new_entry->inheritance = VM_INHERIT_DEFAULT;
918         new_entry->protection = prot;
919         new_entry->max_protection = max;
920         new_entry->wired_count = 0;
921
922         /*
923          * Insert the new entry into the list
924          */
925
926         vm_map_entry_link(map, prev_entry, new_entry);
927         map->size += new_entry->end - new_entry->start;
928
929         /*
930          * Update the free space hint
931          */
932         if ((map->first_free == prev_entry) &&
933             (prev_entry->end >= new_entry->start)) {
934                 map->first_free = new_entry;
935         }
936
937 #if 0
938         /*
939          * Temporarily removed to avoid MAP_STACK panic, due to
940          * MAP_STACK being a huge hack.  Will be added back in
941          * when MAP_STACK (and the user stack mapping) is fixed.
942          */
943         /*
944          * It may be possible to simplify the entry
945          */
946         vm_map_simplify_entry(map, new_entry, countp);
947 #endif
948
949         /*
950          * Try to pre-populate the page table.  Mappings governed by virtual
951          * page tables cannot be prepopulated without a lot of work, so
952          * don't try.
953          */
954         if ((cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) &&
955             maptype != VM_MAPTYPE_VPAGETABLE) {
956                 pmap_object_init_pt(map->pmap, start, prot,
957                                     object, OFF_TO_IDX(offset), end - start,
958                                     cow & MAP_PREFAULT_PARTIAL);
959         }
960
961         return (KERN_SUCCESS);
962 }
963
964 /*
965  * Find sufficient space for `length' bytes in the given map, starting at
966  * `start'.  The map must be locked.  Returns 0 on success, 1 on no space.
967  *
968  * This function will returned an arbitrarily aligned pointer.  If no
969  * particular alignment is required you should pass align as 1.  Note that
970  * the map may return PAGE_SIZE aligned pointers if all the lengths used in
971  * the map are a multiple of PAGE_SIZE, even if you pass a smaller align
972  * argument.
973  *
974  * 'align' should be a power of 2 but is not required to be.
975  */
976 int
977 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length,
978                  vm_offset_t align, int flags, vm_offset_t *addr)
979 {
980         vm_map_entry_t entry, next;
981         vm_offset_t end;
982         vm_offset_t align_mask;
983
984         if (start < map->min_offset)
985                 start = map->min_offset;
986         if (start > map->max_offset)
987                 return (1);
988
989         /*
990          * If the alignment is not a power of 2 we will have to use
991          * a mod/division, set align_mask to a special value.
992          */
993         if ((align | (align - 1)) + 1 != (align << 1))
994                 align_mask = (vm_offset_t)-1;
995         else
996                 align_mask = align - 1;
997
998 retry:
999         /*
1000          * Look for the first possible address; if there's already something
1001          * at this address, we have to start after it.
1002          */
1003         if (start == map->min_offset) {
1004                 if ((entry = map->first_free) != &map->header)
1005                         start = entry->end;
1006         } else {
1007                 vm_map_entry_t tmp;
1008
1009                 if (vm_map_lookup_entry(map, start, &tmp))
1010                         start = tmp->end;
1011                 entry = tmp;
1012         }
1013
1014         /*
1015          * Look through the rest of the map, trying to fit a new region in the
1016          * gap between existing regions, or after the very last region.
1017          */
1018         for (;; start = (entry = next)->end) {
1019                 /*
1020                  * Adjust the proposed start by the requested alignment,
1021                  * be sure that we didn't wrap the address.
1022                  */
1023                 if (align_mask == (vm_offset_t)-1)
1024                         end = ((start + align - 1) / align) * align;
1025                 else
1026                         end = (start + align_mask) & ~align_mask;
1027                 if (end < start)
1028                         return (1);
1029                 start = end;
1030                 /*
1031                  * Find the end of the proposed new region.  Be sure we didn't
1032                  * go beyond the end of the map, or wrap around the address.
1033                  * Then check to see if this is the last entry or if the 
1034                  * proposed end fits in the gap between this and the next
1035                  * entry.
1036                  */
1037                 end = start + length;
1038                 if (end > map->max_offset || end < start)
1039                         return (1);
1040                 next = entry->next;
1041
1042                 /*
1043                  * If the next entry's start address is beyond the desired
1044                  * end address we may have found a good entry.
1045                  *
1046                  * If the next entry is a stack mapping we do not map into
1047                  * the stack's reserved space.
1048                  *
1049                  * XXX continue to allow mapping into the stack's reserved
1050                  * space if doing a MAP_STACK mapping inside a MAP_STACK
1051                  * mapping, for backwards compatibility.  But the caller
1052                  * really should use MAP_STACK | MAP_TRYFIXED if they
1053                  * want to do that.
1054                  */
1055                 if (next == &map->header)
1056                         break;
1057                 if (next->start >= end) {
1058                         if ((next->eflags & MAP_ENTRY_STACK) == 0)
1059                                 break;
1060                         if (flags & MAP_STACK)
1061                                 break;
1062                         if (next->start - next->aux.avail_ssize >= end)
1063                                 break;
1064                 }
1065         }
1066         map->hint = entry;
1067         if (map == &kernel_map) {
1068                 vm_offset_t ksize;
1069                 if ((ksize = round_page(start + length)) > kernel_vm_end) {
1070                         pmap_growkernel(ksize);
1071                         goto retry;
1072                 }
1073         }
1074         *addr = start;
1075         return (0);
1076 }
1077
1078 /*
1079  *      vm_map_find finds an unallocated region in the target address
1080  *      map with the given length.  The search is defined to be
1081  *      first-fit from the specified address; the region found is
1082  *      returned in the same parameter.
1083  *
1084  *      If object is non-NULL, ref count must be bumped by caller
1085  *      prior to making call to account for the new entry.
1086  */
1087 int
1088 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1089             vm_offset_t *addr,  vm_size_t length,
1090             boolean_t fitit,
1091             vm_maptype_t maptype,
1092             vm_prot_t prot, vm_prot_t max,
1093             int cow)
1094 {
1095         vm_offset_t start;
1096         int result;
1097         int count;
1098
1099         start = *addr;
1100
1101         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1102         vm_map_lock(map);
1103         if (fitit) {
1104                 if (vm_map_findspace(map, start, length, 1, 0, addr)) {
1105                         vm_map_unlock(map);
1106                         vm_map_entry_release(count);
1107                         return (KERN_NO_SPACE);
1108                 }
1109                 start = *addr;
1110         }
1111         result = vm_map_insert(map, &count, object, offset,
1112                                start, start + length,
1113                                maptype,
1114                                prot, max,
1115                                cow);
1116         vm_map_unlock(map);
1117         vm_map_entry_release(count);
1118
1119         return (result);
1120 }
1121
1122 /*
1123  *      vm_map_simplify_entry:
1124  *
1125  *      Simplify the given map entry by merging with either neighbor.  This
1126  *      routine also has the ability to merge with both neighbors.
1127  *
1128  *      The map must be locked.
1129  *
1130  *      This routine guarentees that the passed entry remains valid (though
1131  *      possibly extended).  When merging, this routine may delete one or
1132  *      both neighbors.  No action is taken on entries which have their
1133  *      in-transition flag set.
1134  */
1135 void
1136 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry, int *countp)
1137 {
1138         vm_map_entry_t next, prev;
1139         vm_size_t prevsize, esize;
1140
1141         if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1142                 ++mycpu->gd_cnt.v_intrans_coll;
1143                 return;
1144         }
1145
1146         if (entry->maptype == VM_MAPTYPE_SUBMAP)
1147                 return;
1148
1149         prev = entry->prev;
1150         if (prev != &map->header) {
1151                 prevsize = prev->end - prev->start;
1152                 if ( (prev->end == entry->start) &&
1153                      (prev->maptype == entry->maptype) &&
1154                      (prev->object.vm_object == entry->object.vm_object) &&
1155                      (!prev->object.vm_object ||
1156                         (prev->offset + prevsize == entry->offset)) &&
1157                      (prev->eflags == entry->eflags) &&
1158                      (prev->protection == entry->protection) &&
1159                      (prev->max_protection == entry->max_protection) &&
1160                      (prev->inheritance == entry->inheritance) &&
1161                      (prev->wired_count == entry->wired_count)) {
1162                         if (map->first_free == prev)
1163                                 map->first_free = entry;
1164                         if (map->hint == prev)
1165                                 map->hint = entry;
1166                         vm_map_entry_unlink(map, prev);
1167                         entry->start = prev->start;
1168                         entry->offset = prev->offset;
1169                         if (prev->object.vm_object)
1170                                 vm_object_deallocate(prev->object.vm_object);
1171                         vm_map_entry_dispose(map, prev, countp);
1172                 }
1173         }
1174
1175         next = entry->next;
1176         if (next != &map->header) {
1177                 esize = entry->end - entry->start;
1178                 if ((entry->end == next->start) &&
1179                     (next->maptype == entry->maptype) &&
1180                     (next->object.vm_object == entry->object.vm_object) &&
1181                      (!entry->object.vm_object ||
1182                         (entry->offset + esize == next->offset)) &&
1183                     (next->eflags == entry->eflags) &&
1184                     (next->protection == entry->protection) &&
1185                     (next->max_protection == entry->max_protection) &&
1186                     (next->inheritance == entry->inheritance) &&
1187                     (next->wired_count == entry->wired_count)) {
1188                         if (map->first_free == next)
1189                                 map->first_free = entry;
1190                         if (map->hint == next)
1191                                 map->hint = entry;
1192                         vm_map_entry_unlink(map, next);
1193                         entry->end = next->end;
1194                         if (next->object.vm_object)
1195                                 vm_object_deallocate(next->object.vm_object);
1196                         vm_map_entry_dispose(map, next, countp);
1197                 }
1198         }
1199 }
1200 /*
1201  *      vm_map_clip_start:      [ internal use only ]
1202  *
1203  *      Asserts that the given entry begins at or after
1204  *      the specified address; if necessary,
1205  *      it splits the entry into two.
1206  */
1207 #define vm_map_clip_start(map, entry, startaddr, countp) \
1208 { \
1209         if (startaddr > entry->start) \
1210                 _vm_map_clip_start(map, entry, startaddr, countp); \
1211 }
1212
1213 /*
1214  *      This routine is called only when it is known that
1215  *      the entry must be split.
1216  */
1217 static void
1218 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start, int *countp)
1219 {
1220         vm_map_entry_t new_entry;
1221
1222         /*
1223          * Split off the front portion -- note that we must insert the new
1224          * entry BEFORE this one, so that this entry has the specified
1225          * starting address.
1226          */
1227
1228         vm_map_simplify_entry(map, entry, countp);
1229
1230         /*
1231          * If there is no object backing this entry, we might as well create
1232          * one now.  If we defer it, an object can get created after the map
1233          * is clipped, and individual objects will be created for the split-up
1234          * map.  This is a bit of a hack, but is also about the best place to
1235          * put this improvement.
1236          */
1237         if (entry->object.vm_object == NULL && !map->system_map) {
1238                 vm_map_entry_allocate_object(entry);
1239         }
1240
1241         new_entry = vm_map_entry_create(map, countp);
1242         *new_entry = *entry;
1243
1244         new_entry->end = start;
1245         entry->offset += (start - entry->start);
1246         entry->start = start;
1247
1248         vm_map_entry_link(map, entry->prev, new_entry);
1249
1250         switch(entry->maptype) {
1251         case VM_MAPTYPE_NORMAL:
1252         case VM_MAPTYPE_VPAGETABLE:
1253                 vm_object_reference(new_entry->object.vm_object);
1254                 break;
1255         default:
1256                 break;
1257         }
1258 }
1259
1260 /*
1261  *      vm_map_clip_end:        [ internal use only ]
1262  *
1263  *      Asserts that the given entry ends at or before
1264  *      the specified address; if necessary,
1265  *      it splits the entry into two.
1266  */
1267
1268 #define vm_map_clip_end(map, entry, endaddr, countp) \
1269 { \
1270         if (endaddr < entry->end) \
1271                 _vm_map_clip_end(map, entry, endaddr, countp); \
1272 }
1273
1274 /*
1275  *      This routine is called only when it is known that
1276  *      the entry must be split.
1277  */
1278 static void
1279 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end, int *countp)
1280 {
1281         vm_map_entry_t new_entry;
1282
1283         /*
1284          * If there is no object backing this entry, we might as well create
1285          * one now.  If we defer it, an object can get created after the map
1286          * is clipped, and individual objects will be created for the split-up
1287          * map.  This is a bit of a hack, but is also about the best place to
1288          * put this improvement.
1289          */
1290
1291         if (entry->object.vm_object == NULL && !map->system_map) {
1292                 vm_map_entry_allocate_object(entry);
1293         }
1294
1295         /*
1296          * Create a new entry and insert it AFTER the specified entry
1297          */
1298
1299         new_entry = vm_map_entry_create(map, countp);
1300         *new_entry = *entry;
1301
1302         new_entry->start = entry->end = end;
1303         new_entry->offset += (end - entry->start);
1304
1305         vm_map_entry_link(map, entry, new_entry);
1306
1307         switch(entry->maptype) {
1308         case VM_MAPTYPE_NORMAL:
1309         case VM_MAPTYPE_VPAGETABLE:
1310                 vm_object_reference(new_entry->object.vm_object);
1311                 break;
1312         default:
1313                 break;
1314         }
1315 }
1316
1317 /*
1318  *      VM_MAP_RANGE_CHECK:     [ internal use only ]
1319  *
1320  *      Asserts that the starting and ending region
1321  *      addresses fall within the valid range of the map.
1322  */
1323 #define VM_MAP_RANGE_CHECK(map, start, end)             \
1324                 {                                       \
1325                 if (start < vm_map_min(map))            \
1326                         start = vm_map_min(map);        \
1327                 if (end > vm_map_max(map))              \
1328                         end = vm_map_max(map);          \
1329                 if (start > end)                        \
1330                         start = end;                    \
1331                 }
1332
1333 /*
1334  *      vm_map_transition_wait: [ kernel use only ]
1335  *
1336  *      Used to block when an in-transition collison occurs.  The map
1337  *      is unlocked for the sleep and relocked before the return.
1338  */
1339 static
1340 void
1341 vm_map_transition_wait(vm_map_t map)
1342 {
1343         vm_map_unlock(map);
1344         tsleep(map, 0, "vment", 0);
1345         vm_map_lock(map);
1346 }
1347
1348 /*
1349  * CLIP_CHECK_BACK
1350  * CLIP_CHECK_FWD
1351  *
1352  *      When we do blocking operations with the map lock held it is
1353  *      possible that a clip might have occured on our in-transit entry,
1354  *      requiring an adjustment to the entry in our loop.  These macros
1355  *      help the pageable and clip_range code deal with the case.  The
1356  *      conditional costs virtually nothing if no clipping has occured.
1357  */
1358
1359 #define CLIP_CHECK_BACK(entry, save_start)              \
1360     do {                                                \
1361             while (entry->start != save_start) {        \
1362                     entry = entry->prev;                \
1363                     KASSERT(entry != &map->header, ("bad entry clip")); \
1364             }                                           \
1365     } while(0)
1366
1367 #define CLIP_CHECK_FWD(entry, save_end)                 \
1368     do {                                                \
1369             while (entry->end != save_end) {            \
1370                     entry = entry->next;                \
1371                     KASSERT(entry != &map->header, ("bad entry clip")); \
1372             }                                           \
1373     } while(0)
1374
1375
1376 /*
1377  *      vm_map_clip_range:      [ kernel use only ]
1378  *
1379  *      Clip the specified range and return the base entry.  The
1380  *      range may cover several entries starting at the returned base
1381  *      and the first and last entry in the covering sequence will be
1382  *      properly clipped to the requested start and end address.
1383  *
1384  *      If no holes are allowed you should pass the MAP_CLIP_NO_HOLES
1385  *      flag.  
1386  *
1387  *      The MAP_ENTRY_IN_TRANSITION flag will be set for the entries
1388  *      covered by the requested range.
1389  *
1390  *      The map must be exclusively locked on entry and will remain locked
1391  *      on return. If no range exists or the range contains holes and you
1392  *      specified that no holes were allowed, NULL will be returned.  This
1393  *      routine may temporarily unlock the map in order avoid a deadlock when
1394  *      sleeping.
1395  */
1396 static
1397 vm_map_entry_t
1398 vm_map_clip_range(vm_map_t map, vm_offset_t start, vm_offset_t end, 
1399         int *countp, int flags)
1400 {
1401         vm_map_entry_t start_entry;
1402         vm_map_entry_t entry;
1403
1404         /*
1405          * Locate the entry and effect initial clipping.  The in-transition
1406          * case does not occur very often so do not try to optimize it.
1407          */
1408 again:
1409         if (vm_map_lookup_entry(map, start, &start_entry) == FALSE)
1410                 return (NULL);
1411         entry = start_entry;
1412         if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1413                 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1414                 ++mycpu->gd_cnt.v_intrans_coll;
1415                 ++mycpu->gd_cnt.v_intrans_wait;
1416                 vm_map_transition_wait(map);
1417                 /*
1418                  * entry and/or start_entry may have been clipped while
1419                  * we slept, or may have gone away entirely.  We have
1420                  * to restart from the lookup.
1421                  */
1422                 goto again;
1423         }
1424         /*
1425          * Since we hold an exclusive map lock we do not have to restart
1426          * after clipping, even though clipping may block in zalloc.
1427          */
1428         vm_map_clip_start(map, entry, start, countp);
1429         vm_map_clip_end(map, entry, end, countp);
1430         entry->eflags |= MAP_ENTRY_IN_TRANSITION;
1431
1432         /*
1433          * Scan entries covered by the range.  When working on the next
1434          * entry a restart need only re-loop on the current entry which
1435          * we have already locked, since 'next' may have changed.  Also,
1436          * even though entry is safe, it may have been clipped so we
1437          * have to iterate forwards through the clip after sleeping.
1438          */
1439         while (entry->next != &map->header && entry->next->start < end) {
1440                 vm_map_entry_t next = entry->next;
1441
1442                 if (flags & MAP_CLIP_NO_HOLES) {
1443                         if (next->start > entry->end) {
1444                                 vm_map_unclip_range(map, start_entry,
1445                                         start, entry->end, countp, flags);
1446                                 return(NULL);
1447                         }
1448                 }
1449
1450                 if (next->eflags & MAP_ENTRY_IN_TRANSITION) {
1451                         vm_offset_t save_end = entry->end;
1452                         next->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1453                         ++mycpu->gd_cnt.v_intrans_coll;
1454                         ++mycpu->gd_cnt.v_intrans_wait;
1455                         vm_map_transition_wait(map);
1456
1457                         /*
1458                          * clips might have occured while we blocked.
1459                          */
1460                         CLIP_CHECK_FWD(entry, save_end);
1461                         CLIP_CHECK_BACK(start_entry, start);
1462                         continue;
1463                 }
1464                 /*
1465                  * No restart necessary even though clip_end may block, we
1466                  * are holding the map lock.
1467                  */
1468                 vm_map_clip_end(map, next, end, countp);
1469                 next->eflags |= MAP_ENTRY_IN_TRANSITION;
1470                 entry = next;
1471         }
1472         if (flags & MAP_CLIP_NO_HOLES) {
1473                 if (entry->end != end) {
1474                         vm_map_unclip_range(map, start_entry,
1475                                 start, entry->end, countp, flags);
1476                         return(NULL);
1477                 }
1478         }
1479         return(start_entry);
1480 }
1481
1482 /*
1483  *      vm_map_unclip_range:    [ kernel use only ]
1484  *
1485  *      Undo the effect of vm_map_clip_range().  You should pass the same
1486  *      flags and the same range that you passed to vm_map_clip_range().
1487  *      This code will clear the in-transition flag on the entries and
1488  *      wake up anyone waiting.  This code will also simplify the sequence 
1489  *      and attempt to merge it with entries before and after the sequence.
1490  *
1491  *      The map must be locked on entry and will remain locked on return.
1492  *
1493  *      Note that you should also pass the start_entry returned by 
1494  *      vm_map_clip_range().  However, if you block between the two calls
1495  *      with the map unlocked please be aware that the start_entry may
1496  *      have been clipped and you may need to scan it backwards to find
1497  *      the entry corresponding with the original start address.  You are
1498  *      responsible for this, vm_map_unclip_range() expects the correct
1499  *      start_entry to be passed to it and will KASSERT otherwise.
1500  */
1501 static
1502 void
1503 vm_map_unclip_range(
1504         vm_map_t map,
1505         vm_map_entry_t start_entry,
1506         vm_offset_t start,
1507         vm_offset_t end,
1508         int *countp,
1509         int flags)
1510 {
1511         vm_map_entry_t entry;
1512
1513         entry = start_entry;
1514
1515         KASSERT(entry->start == start, ("unclip_range: illegal base entry"));
1516         while (entry != &map->header && entry->start < end) {
1517                 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION, ("in-transition flag not set during unclip on: %p", entry));
1518                 KASSERT(entry->end <= end, ("unclip_range: tail wasn't clipped"));
1519                 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
1520                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
1521                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
1522                         wakeup(map);
1523                 }
1524                 entry = entry->next;
1525         }
1526
1527         /*
1528          * Simplification does not block so there is no restart case.
1529          */
1530         entry = start_entry;
1531         while (entry != &map->header && entry->start < end) {
1532                 vm_map_simplify_entry(map, entry, countp);
1533                 entry = entry->next;
1534         }
1535 }
1536
1537 /*
1538  *      vm_map_submap:          [ kernel use only ]
1539  *
1540  *      Mark the given range as handled by a subordinate map.
1541  *
1542  *      This range must have been created with vm_map_find,
1543  *      and no other operations may have been performed on this
1544  *      range prior to calling vm_map_submap.
1545  *
1546  *      Only a limited number of operations can be performed
1547  *      within this rage after calling vm_map_submap:
1548  *              vm_fault
1549  *      [Don't try vm_map_copy!]
1550  *
1551  *      To remove a submapping, one must first remove the
1552  *      range from the superior map, and then destroy the
1553  *      submap (if desired).  [Better yet, don't try it.]
1554  */
1555 int
1556 vm_map_submap(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_map_t submap)
1557 {
1558         vm_map_entry_t entry;
1559         int result = KERN_INVALID_ARGUMENT;
1560         int count;
1561
1562         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1563         vm_map_lock(map);
1564
1565         VM_MAP_RANGE_CHECK(map, start, end);
1566
1567         if (vm_map_lookup_entry(map, start, &entry)) {
1568                 vm_map_clip_start(map, entry, start, &count);
1569         } else {
1570                 entry = entry->next;
1571         }
1572
1573         vm_map_clip_end(map, entry, end, &count);
1574
1575         if ((entry->start == start) && (entry->end == end) &&
1576             ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1577             (entry->object.vm_object == NULL)) {
1578                 entry->object.sub_map = submap;
1579                 entry->maptype = VM_MAPTYPE_SUBMAP;
1580                 result = KERN_SUCCESS;
1581         }
1582         vm_map_unlock(map);
1583         vm_map_entry_release(count);
1584
1585         return (result);
1586 }
1587
1588 /*
1589  * vm_map_protect:
1590  *
1591  * Sets the protection of the specified address region in the target map. 
1592  * If "set_max" is specified, the maximum protection is to be set;
1593  * otherwise, only the current protection is affected.
1594  *
1595  * The protection is not applicable to submaps, but is applicable to normal
1596  * maps and maps governed by virtual page tables.  For example, when operating
1597  * on a virtual page table our protection basically controls how COW occurs
1598  * on the backing object, whereas the virtual page table abstraction itself
1599  * is an abstraction for userland.
1600  */
1601 int
1602 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1603                vm_prot_t new_prot, boolean_t set_max)
1604 {
1605         vm_map_entry_t current;
1606         vm_map_entry_t entry;
1607         int count;
1608
1609         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1610         vm_map_lock(map);
1611
1612         VM_MAP_RANGE_CHECK(map, start, end);
1613
1614         if (vm_map_lookup_entry(map, start, &entry)) {
1615                 vm_map_clip_start(map, entry, start, &count);
1616         } else {
1617                 entry = entry->next;
1618         }
1619
1620         /*
1621          * Make a first pass to check for protection violations.
1622          */
1623         current = entry;
1624         while ((current != &map->header) && (current->start < end)) {
1625                 if (current->maptype == VM_MAPTYPE_SUBMAP) {
1626                         vm_map_unlock(map);
1627                         vm_map_entry_release(count);
1628                         return (KERN_INVALID_ARGUMENT);
1629                 }
1630                 if ((new_prot & current->max_protection) != new_prot) {
1631                         vm_map_unlock(map);
1632                         vm_map_entry_release(count);
1633                         return (KERN_PROTECTION_FAILURE);
1634                 }
1635                 current = current->next;
1636         }
1637
1638         /*
1639          * Go back and fix up protections. [Note that clipping is not
1640          * necessary the second time.]
1641          */
1642         current = entry;
1643
1644         while ((current != &map->header) && (current->start < end)) {
1645                 vm_prot_t old_prot;
1646
1647                 vm_map_clip_end(map, current, end, &count);
1648
1649                 old_prot = current->protection;
1650                 if (set_max) {
1651                         current->protection =
1652                             (current->max_protection = new_prot) &
1653                             old_prot;
1654                 } else {
1655                         current->protection = new_prot;
1656                 }
1657
1658                 /*
1659                  * Update physical map if necessary. Worry about copy-on-write
1660                  * here -- CHECK THIS XXX
1661                  */
1662
1663                 if (current->protection != old_prot) {
1664 #define MASK(entry)     (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1665                                                         VM_PROT_ALL)
1666
1667                         pmap_protect(map->pmap, current->start,
1668                             current->end,
1669                             current->protection & MASK(current));
1670 #undef  MASK
1671                 }
1672
1673                 vm_map_simplify_entry(map, current, &count);
1674
1675                 current = current->next;
1676         }
1677
1678         vm_map_unlock(map);
1679         vm_map_entry_release(count);
1680         return (KERN_SUCCESS);
1681 }
1682
1683 /*
1684  *      vm_map_madvise:
1685  *
1686  *      This routine traverses a processes map handling the madvise
1687  *      system call.  Advisories are classified as either those effecting
1688  *      the vm_map_entry structure, or those effecting the underlying 
1689  *      objects.
1690  *
1691  *      The <value> argument is used for extended madvise calls.
1692  */
1693 int
1694 vm_map_madvise(vm_map_t map, vm_offset_t start, vm_offset_t end,
1695                int behav, off_t value)
1696 {
1697         vm_map_entry_t current, entry;
1698         int modify_map = 0;
1699         int error = 0;
1700         int count;
1701
1702         /*
1703          * Some madvise calls directly modify the vm_map_entry, in which case
1704          * we need to use an exclusive lock on the map and we need to perform 
1705          * various clipping operations.  Otherwise we only need a read-lock
1706          * on the map.
1707          */
1708
1709         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1710
1711         switch(behav) {
1712         case MADV_NORMAL:
1713         case MADV_SEQUENTIAL:
1714         case MADV_RANDOM:
1715         case MADV_NOSYNC:
1716         case MADV_AUTOSYNC:
1717         case MADV_NOCORE:
1718         case MADV_CORE:
1719         case MADV_SETMAP:
1720         case MADV_INVAL:
1721                 modify_map = 1;
1722                 vm_map_lock(map);
1723                 break;
1724         case MADV_WILLNEED:
1725         case MADV_DONTNEED:
1726         case MADV_FREE:
1727                 vm_map_lock_read(map);
1728                 break;
1729         default:
1730                 vm_map_entry_release(count);
1731                 return (EINVAL);
1732         }
1733
1734         /*
1735          * Locate starting entry and clip if necessary.
1736          */
1737
1738         VM_MAP_RANGE_CHECK(map, start, end);
1739
1740         if (vm_map_lookup_entry(map, start, &entry)) {
1741                 if (modify_map)
1742                         vm_map_clip_start(map, entry, start, &count);
1743         } else {
1744                 entry = entry->next;
1745         }
1746
1747         if (modify_map) {
1748                 /*
1749                  * madvise behaviors that are implemented in the vm_map_entry.
1750                  *
1751                  * We clip the vm_map_entry so that behavioral changes are
1752                  * limited to the specified address range.
1753                  */
1754                 for (current = entry;
1755                      (current != &map->header) && (current->start < end);
1756                      current = current->next
1757                 ) {
1758                         if (current->maptype == VM_MAPTYPE_SUBMAP)
1759                                 continue;
1760
1761                         vm_map_clip_end(map, current, end, &count);
1762
1763                         switch (behav) {
1764                         case MADV_NORMAL:
1765                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
1766                                 break;
1767                         case MADV_SEQUENTIAL:
1768                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
1769                                 break;
1770                         case MADV_RANDOM:
1771                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
1772                                 break;
1773                         case MADV_NOSYNC:
1774                                 current->eflags |= MAP_ENTRY_NOSYNC;
1775                                 break;
1776                         case MADV_AUTOSYNC:
1777                                 current->eflags &= ~MAP_ENTRY_NOSYNC;
1778                                 break;
1779                         case MADV_NOCORE:
1780                                 current->eflags |= MAP_ENTRY_NOCOREDUMP;
1781                                 break;
1782                         case MADV_CORE:
1783                                 current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
1784                                 break;
1785                         case MADV_INVAL:
1786                                 /*
1787                                  * Invalidate the related pmap entries, used
1788                                  * to flush portions of the real kernel's
1789                                  * pmap when the caller has removed or
1790                                  * modified existing mappings in a virtual
1791                                  * page table.
1792                                  */
1793                                 pmap_remove(map->pmap,
1794                                             current->start, current->end);
1795                                 break;
1796                         case MADV_SETMAP:
1797                                 /*
1798                                  * Set the page directory page for a map
1799                                  * governed by a virtual page table.  Mark
1800                                  * the entry as being governed by a virtual
1801                                  * page table if it is not.
1802                                  *
1803                                  * XXX the page directory page is stored
1804                                  * in the avail_ssize field if the map_entry.
1805                                  *
1806                                  * XXX the map simplification code does not
1807                                  * compare this field so weird things may
1808                                  * happen if you do not apply this function
1809                                  * to the entire mapping governed by the
1810                                  * virtual page table.
1811                                  */
1812                                 if (current->maptype != VM_MAPTYPE_VPAGETABLE) {
1813                                         error = EINVAL;
1814                                         break;
1815                                 }
1816                                 current->aux.master_pde = value;
1817                                 pmap_remove(map->pmap,
1818                                             current->start, current->end);
1819                                 break;
1820                         default:
1821                                 error = EINVAL;
1822                                 break;
1823                         }
1824                         vm_map_simplify_entry(map, current, &count);
1825                 }
1826                 vm_map_unlock(map);
1827         } else {
1828                 vm_pindex_t pindex;
1829                 int count;
1830
1831                 /*
1832                  * madvise behaviors that are implemented in the underlying
1833                  * vm_object.
1834                  *
1835                  * Since we don't clip the vm_map_entry, we have to clip
1836                  * the vm_object pindex and count.
1837                  *
1838                  * NOTE!  We currently do not support these functions on
1839                  * virtual page tables.
1840                  */
1841                 for (current = entry;
1842                      (current != &map->header) && (current->start < end);
1843                      current = current->next
1844                 ) {
1845                         vm_offset_t useStart;
1846
1847                         if (current->maptype != VM_MAPTYPE_NORMAL)
1848                                 continue;
1849
1850                         pindex = OFF_TO_IDX(current->offset);
1851                         count = atop(current->end - current->start);
1852                         useStart = current->start;
1853
1854                         if (current->start < start) {
1855                                 pindex += atop(start - current->start);
1856                                 count -= atop(start - current->start);
1857                                 useStart = start;
1858                         }
1859                         if (current->end > end)
1860                                 count -= atop(current->end - end);
1861
1862                         if (count <= 0)
1863                                 continue;
1864
1865                         vm_object_madvise(current->object.vm_object,
1866                                           pindex, count, behav);
1867
1868                         /*
1869                          * Try to populate the page table.  Mappings governed
1870                          * by virtual page tables cannot be pre-populated
1871                          * without a lot of work so don't try.
1872                          */
1873                         if (behav == MADV_WILLNEED &&
1874                             current->maptype != VM_MAPTYPE_VPAGETABLE) {
1875                                 pmap_object_init_pt(
1876                                     map->pmap, 
1877                                     useStart,
1878                                     current->protection,
1879                                     current->object.vm_object,
1880                                     pindex, 
1881                                     (count << PAGE_SHIFT),
1882                                     MAP_PREFAULT_MADVISE
1883                                 );
1884                         }
1885                 }
1886                 vm_map_unlock_read(map);
1887         }
1888         vm_map_entry_release(count);
1889         return(error);
1890 }       
1891
1892
1893 /*
1894  *      vm_map_inherit:
1895  *
1896  *      Sets the inheritance of the specified address
1897  *      range in the target map.  Inheritance
1898  *      affects how the map will be shared with
1899  *      child maps at the time of vm_map_fork.
1900  */
1901 int
1902 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
1903                vm_inherit_t new_inheritance)
1904 {
1905         vm_map_entry_t entry;
1906         vm_map_entry_t temp_entry;
1907         int count;
1908
1909         switch (new_inheritance) {
1910         case VM_INHERIT_NONE:
1911         case VM_INHERIT_COPY:
1912         case VM_INHERIT_SHARE:
1913                 break;
1914         default:
1915                 return (KERN_INVALID_ARGUMENT);
1916         }
1917
1918         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1919         vm_map_lock(map);
1920
1921         VM_MAP_RANGE_CHECK(map, start, end);
1922
1923         if (vm_map_lookup_entry(map, start, &temp_entry)) {
1924                 entry = temp_entry;
1925                 vm_map_clip_start(map, entry, start, &count);
1926         } else
1927                 entry = temp_entry->next;
1928
1929         while ((entry != &map->header) && (entry->start < end)) {
1930                 vm_map_clip_end(map, entry, end, &count);
1931
1932                 entry->inheritance = new_inheritance;
1933
1934                 vm_map_simplify_entry(map, entry, &count);
1935
1936                 entry = entry->next;
1937         }
1938         vm_map_unlock(map);
1939         vm_map_entry_release(count);
1940         return (KERN_SUCCESS);
1941 }
1942
1943 /*
1944  * Implement the semantics of mlock
1945  */
1946 int
1947 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t real_end,
1948     boolean_t new_pageable)
1949 {
1950         vm_map_entry_t entry;
1951         vm_map_entry_t start_entry;
1952         vm_offset_t end;
1953         int rv = KERN_SUCCESS;
1954         int count;
1955
1956         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1957         vm_map_lock(map);
1958         VM_MAP_RANGE_CHECK(map, start, real_end);
1959         end = real_end;
1960
1961         start_entry = vm_map_clip_range(map, start, end, &count, MAP_CLIP_NO_HOLES);
1962         if (start_entry == NULL) {
1963                 vm_map_unlock(map);
1964                 vm_map_entry_release(count);
1965                 return (KERN_INVALID_ADDRESS);
1966         }
1967
1968         if (new_pageable == 0) {
1969                 entry = start_entry;
1970                 while ((entry != &map->header) && (entry->start < end)) {
1971                         vm_offset_t save_start;
1972                         vm_offset_t save_end;
1973
1974                         /*
1975                          * Already user wired or hard wired (trivial cases)
1976                          */
1977                         if (entry->eflags & MAP_ENTRY_USER_WIRED) {
1978                                 entry = entry->next;
1979                                 continue;
1980                         }
1981                         if (entry->wired_count != 0) {
1982                                 entry->wired_count++;
1983                                 entry->eflags |= MAP_ENTRY_USER_WIRED;
1984                                 entry = entry->next;
1985                                 continue;
1986                         }
1987
1988                         /*
1989                          * A new wiring requires instantiation of appropriate
1990                          * management structures and the faulting in of the
1991                          * page.
1992                          */
1993                         if (entry->maptype != VM_MAPTYPE_SUBMAP) {
1994                                 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
1995                                 if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) {
1996                                         vm_map_entry_shadow(entry);
1997                                 } else if (entry->object.vm_object == NULL &&
1998                                            !map->system_map) {
1999                                         vm_map_entry_allocate_object(entry);
2000                                 }
2001                         }
2002                         entry->wired_count++;
2003                         entry->eflags |= MAP_ENTRY_USER_WIRED;
2004
2005                         /*
2006                          * Now fault in the area.  Note that vm_fault_wire()
2007                          * may release the map lock temporarily, it will be
2008                          * relocked on return.  The in-transition
2009                          * flag protects the entries. 
2010                          */
2011                         save_start = entry->start;
2012                         save_end = entry->end;
2013                         rv = vm_fault_wire(map, entry, TRUE);
2014                         if (rv) {
2015                                 CLIP_CHECK_BACK(entry, save_start);
2016                                 for (;;) {
2017                                         KASSERT(entry->wired_count == 1, ("bad wired_count on entry"));
2018                                         entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2019                                         entry->wired_count = 0;
2020                                         if (entry->end == save_end)
2021                                                 break;
2022                                         entry = entry->next;
2023                                         KASSERT(entry != &map->header, ("bad entry clip during backout"));
2024                                 }
2025                                 end = save_start;       /* unwire the rest */
2026                                 break;
2027                         }
2028                         /*
2029                          * note that even though the entry might have been
2030                          * clipped, the USER_WIRED flag we set prevents
2031                          * duplication so we do not have to do a 
2032                          * clip check.
2033                          */
2034                         entry = entry->next;
2035                 }
2036
2037                 /*
2038                  * If we failed fall through to the unwiring section to
2039                  * unwire what we had wired so far.  'end' has already
2040                  * been adjusted.
2041                  */
2042                 if (rv)
2043                         new_pageable = 1;
2044
2045                 /*
2046                  * start_entry might have been clipped if we unlocked the
2047                  * map and blocked.  No matter how clipped it has gotten
2048                  * there should be a fragment that is on our start boundary.
2049                  */
2050                 CLIP_CHECK_BACK(start_entry, start);
2051         }
2052
2053         /*
2054          * Deal with the unwiring case.
2055          */
2056         if (new_pageable) {
2057                 /*
2058                  * This is the unwiring case.  We must first ensure that the
2059                  * range to be unwired is really wired down.  We know there
2060                  * are no holes.
2061                  */
2062                 entry = start_entry;
2063                 while ((entry != &map->header) && (entry->start < end)) {
2064                         if ((entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2065                                 rv = KERN_INVALID_ARGUMENT;
2066                                 goto done;
2067                         }
2068                         KASSERT(entry->wired_count != 0, ("wired count was 0 with USER_WIRED set! %p", entry));
2069                         entry = entry->next;
2070                 }
2071
2072                 /*
2073                  * Now decrement the wiring count for each region. If a region
2074                  * becomes completely unwired, unwire its physical pages and
2075                  * mappings.
2076                  */
2077                 /*
2078                  * The map entries are processed in a loop, checking to
2079                  * make sure the entry is wired and asserting it has a wired
2080                  * count. However, another loop was inserted more-or-less in
2081                  * the middle of the unwiring path. This loop picks up the
2082                  * "entry" loop variable from the first loop without first
2083                  * setting it to start_entry. Naturally, the secound loop
2084                  * is never entered and the pages backing the entries are
2085                  * never unwired. This can lead to a leak of wired pages.
2086                  */
2087                 entry = start_entry;
2088                 while ((entry != &map->header) && (entry->start < end)) {
2089                         KASSERT(entry->eflags & MAP_ENTRY_USER_WIRED,
2090                                 ("expected USER_WIRED on entry %p", entry));
2091                         entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2092                         entry->wired_count--;
2093                         if (entry->wired_count == 0)
2094                                 vm_fault_unwire(map, entry);
2095                         entry = entry->next;
2096                 }
2097         }
2098 done:
2099         vm_map_unclip_range(map, start_entry, start, real_end, &count,
2100                 MAP_CLIP_NO_HOLES);
2101         map->timestamp++;
2102         vm_map_unlock(map);
2103         vm_map_entry_release(count);
2104         return (rv);
2105 }
2106
2107 /*
2108  *      vm_map_wire:
2109  *
2110  *      Sets the pageability of the specified address
2111  *      range in the target map.  Regions specified
2112  *      as not pageable require locked-down physical
2113  *      memory and physical page maps.
2114  *
2115  *      The map must not be locked, but a reference
2116  *      must remain to the map throughout the call.
2117  *
2118  *      This function may be called via the zalloc path and must properly
2119  *      reserve map entries for kernel_map.
2120  */
2121 int
2122 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t real_end, int kmflags)
2123 {
2124         vm_map_entry_t entry;
2125         vm_map_entry_t start_entry;
2126         vm_offset_t end;
2127         int rv = KERN_SUCCESS;
2128         int count;
2129
2130         if (kmflags & KM_KRESERVE)
2131                 count = vm_map_entry_kreserve(MAP_RESERVE_COUNT);
2132         else
2133                 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2134         vm_map_lock(map);
2135         VM_MAP_RANGE_CHECK(map, start, real_end);
2136         end = real_end;
2137
2138         start_entry = vm_map_clip_range(map, start, end, &count, MAP_CLIP_NO_HOLES);
2139         if (start_entry == NULL) {
2140                 vm_map_unlock(map);
2141                 rv = KERN_INVALID_ADDRESS;
2142                 goto failure;
2143         }
2144         if ((kmflags & KM_PAGEABLE) == 0) {
2145                 /*
2146                  * Wiring.  
2147                  *
2148                  * 1.  Holding the write lock, we create any shadow or zero-fill
2149                  * objects that need to be created. Then we clip each map
2150                  * entry to the region to be wired and increment its wiring
2151                  * count.  We create objects before clipping the map entries
2152                  * to avoid object proliferation.
2153                  *
2154                  * 2.  We downgrade to a read lock, and call vm_fault_wire to
2155                  * fault in the pages for any newly wired area (wired_count is
2156                  * 1).
2157                  *
2158                  * Downgrading to a read lock for vm_fault_wire avoids a 
2159                  * possible deadlock with another process that may have faulted
2160                  * on one of the pages to be wired (it would mark the page busy,
2161                  * blocking us, then in turn block on the map lock that we
2162                  * hold).  Because of problems in the recursive lock package,
2163                  * we cannot upgrade to a write lock in vm_map_lookup.  Thus,
2164                  * any actions that require the write lock must be done
2165                  * beforehand.  Because we keep the read lock on the map, the
2166                  * copy-on-write status of the entries we modify here cannot
2167                  * change.
2168                  */
2169
2170                 entry = start_entry;
2171                 while ((entry != &map->header) && (entry->start < end)) {
2172                         /*
2173                          * Trivial case if the entry is already wired
2174                          */
2175                         if (entry->wired_count) {
2176                                 entry->wired_count++;
2177                                 entry = entry->next;
2178                                 continue;
2179                         }
2180
2181                         /*
2182                          * The entry is being newly wired, we have to setup
2183                          * appropriate management structures.  A shadow 
2184                          * object is required for a copy-on-write region,
2185                          * or a normal object for a zero-fill region.  We
2186                          * do not have to do this for entries that point to sub
2187                          * maps because we won't hold the lock on the sub map.
2188                          */
2189                         if (entry->maptype != VM_MAPTYPE_SUBMAP) {
2190                                 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
2191                                 if (copyflag &&
2192                                     ((entry->protection & VM_PROT_WRITE) != 0)) {
2193                                         vm_map_entry_shadow(entry);
2194                                 } else if (entry->object.vm_object == NULL &&
2195                                            !map->system_map) {
2196                                         vm_map_entry_allocate_object(entry);
2197                                 }
2198                         }
2199
2200                         entry->wired_count++;
2201                         entry = entry->next;
2202                 }
2203
2204                 /*
2205                  * Pass 2.
2206                  */
2207
2208                 /*
2209                  * HACK HACK HACK HACK
2210                  *
2211                  * Unlock the map to avoid deadlocks.  The in-transit flag
2212                  * protects us from most changes but note that
2213                  * clipping may still occur.  To prevent clipping from
2214                  * occuring after the unlock, except for when we are
2215                  * blocking in vm_fault_wire, we must run in a critical
2216                  * section, otherwise our accesses to entry->start and 
2217                  * entry->end could be corrupted.  We have to enter the
2218                  * critical section prior to unlocking so start_entry does
2219                  * not change out from under us at the very beginning of the
2220                  * loop.
2221                  *
2222                  * HACK HACK HACK HACK
2223                  */
2224
2225                 crit_enter();
2226
2227                 entry = start_entry;
2228                 while (entry != &map->header && entry->start < end) {
2229                         /*
2230                          * If vm_fault_wire fails for any page we need to undo
2231                          * what has been done.  We decrement the wiring count
2232                          * for those pages which have not yet been wired (now)
2233                          * and unwire those that have (later).
2234                          */
2235                         vm_offset_t save_start = entry->start;
2236                         vm_offset_t save_end = entry->end;
2237
2238                         if (entry->wired_count == 1)
2239                                 rv = vm_fault_wire(map, entry, FALSE);
2240                         if (rv) {
2241                                 CLIP_CHECK_BACK(entry, save_start);
2242                                 for (;;) {
2243                                         KASSERT(entry->wired_count == 1, ("wired_count changed unexpectedly"));
2244                                         entry->wired_count = 0;
2245                                         if (entry->end == save_end)
2246                                                 break;
2247                                         entry = entry->next;
2248                                         KASSERT(entry != &map->header, ("bad entry clip during backout"));
2249                                 }
2250                                 end = save_start;
2251                                 break;
2252                         }
2253                         CLIP_CHECK_FWD(entry, save_end);
2254                         entry = entry->next;
2255                 }
2256                 crit_exit();
2257
2258                 /*
2259                  * If a failure occured undo everything by falling through
2260                  * to the unwiring code.  'end' has already been adjusted
2261                  * appropriately.
2262                  */
2263                 if (rv)
2264                         kmflags |= KM_PAGEABLE;
2265
2266                 /*
2267                  * start_entry is still IN_TRANSITION but may have been 
2268                  * clipped since vm_fault_wire() unlocks and relocks the
2269                  * map.  No matter how clipped it has gotten there should
2270                  * be a fragment that is on our start boundary.
2271                  */
2272                 CLIP_CHECK_BACK(start_entry, start);
2273         }
2274
2275         if (kmflags & KM_PAGEABLE) {
2276                 /*
2277                  * This is the unwiring case.  We must first ensure that the
2278                  * range to be unwired is really wired down.  We know there
2279                  * are no holes.
2280                  */
2281                 entry = start_entry;
2282                 while ((entry != &map->header) && (entry->start < end)) {
2283                         if (entry->wired_count == 0) {
2284                                 rv = KERN_INVALID_ARGUMENT;
2285                                 goto done;
2286                         }
2287                         entry = entry->next;
2288                 }
2289
2290                 /*
2291                  * Now decrement the wiring count for each region. If a region
2292                  * becomes completely unwired, unwire its physical pages and
2293                  * mappings.
2294                  */
2295                 entry = start_entry;
2296                 while ((entry != &map->header) && (entry->start < end)) {
2297                         entry->wired_count--;
2298                         if (entry->wired_count == 0)
2299                                 vm_fault_unwire(map, entry);
2300                         entry = entry->next;
2301                 }
2302         }
2303 done:
2304         vm_map_unclip_range(map, start_entry, start, real_end, &count,
2305                 MAP_CLIP_NO_HOLES);
2306         map->timestamp++;
2307         vm_map_unlock(map);
2308 failure:
2309         if (kmflags & KM_KRESERVE)
2310                 vm_map_entry_krelease(count);
2311         else
2312                 vm_map_entry_release(count);
2313         return (rv);
2314 }
2315
2316 /*
2317  * vm_map_set_wired_quick()
2318  *
2319  *      Mark a newly allocated address range as wired but do not fault in
2320  *      the pages.  The caller is expected to load the pages into the object.
2321  *
2322  *      The map must be locked on entry and will remain locked on return.
2323  */
2324 void
2325 vm_map_set_wired_quick(vm_map_t map, vm_offset_t addr, vm_size_t size, int *countp)
2326 {
2327         vm_map_entry_t scan;
2328         vm_map_entry_t entry;
2329
2330         entry = vm_map_clip_range(map, addr, addr + size, countp, MAP_CLIP_NO_HOLES);
2331         for (scan = entry; scan != &map->header && scan->start < addr + size; scan = scan->next) {
2332             KKASSERT(entry->wired_count == 0);
2333             entry->wired_count = 1;                                              
2334         }
2335         vm_map_unclip_range(map, entry, addr, addr + size, countp, MAP_CLIP_NO_HOLES);
2336 }
2337
2338 /*
2339  * vm_map_clean
2340  *
2341  * Push any dirty cached pages in the address range to their pager.
2342  * If syncio is TRUE, dirty pages are written synchronously.
2343  * If invalidate is TRUE, any cached pages are freed as well.
2344  *
2345  * Returns an error if any part of the specified range is not mapped.
2346  */
2347 int
2348 vm_map_clean(vm_map_t map, vm_offset_t start, vm_offset_t end, boolean_t syncio,
2349     boolean_t invalidate)
2350 {
2351         vm_map_entry_t current;
2352         vm_map_entry_t entry;
2353         vm_size_t size;
2354         vm_object_t object;
2355         vm_ooffset_t offset;
2356
2357         vm_map_lock_read(map);
2358         VM_MAP_RANGE_CHECK(map, start, end);
2359         if (!vm_map_lookup_entry(map, start, &entry)) {
2360                 vm_map_unlock_read(map);
2361                 return (KERN_INVALID_ADDRESS);
2362         }
2363         /*
2364          * Make a first pass to check for holes.
2365          */
2366         for (current = entry; current->start < end; current = current->next) {
2367                 if (current->maptype == VM_MAPTYPE_SUBMAP) {
2368                         vm_map_unlock_read(map);
2369                         return (KERN_INVALID_ARGUMENT);
2370                 }
2371                 if (end > current->end &&
2372                     (current->next == &map->header ||
2373                         current->end != current->next->start)) {
2374                         vm_map_unlock_read(map);
2375                         return (KERN_INVALID_ADDRESS);
2376                 }
2377         }
2378
2379         if (invalidate)
2380                 pmap_remove(vm_map_pmap(map), start, end);
2381         /*
2382          * Make a second pass, cleaning/uncaching pages from the indicated
2383          * objects as we go.
2384          */
2385         for (current = entry; current->start < end; current = current->next) {
2386                 offset = current->offset + (start - current->start);
2387                 size = (end <= current->end ? end : current->end) - start;
2388                 if (current->maptype == VM_MAPTYPE_SUBMAP) {
2389                         vm_map_t smap;
2390                         vm_map_entry_t tentry;
2391                         vm_size_t tsize;
2392
2393                         smap = current->object.sub_map;
2394                         vm_map_lock_read(smap);
2395                         vm_map_lookup_entry(smap, offset, &tentry);
2396                         tsize = tentry->end - offset;
2397                         if (tsize < size)
2398                                 size = tsize;
2399                         object = tentry->object.vm_object;
2400                         offset = tentry->offset + (offset - tentry->start);
2401                         vm_map_unlock_read(smap);
2402                 } else {
2403                         object = current->object.vm_object;
2404                 }
2405                 /*
2406                  * Note that there is absolutely no sense in writing out
2407                  * anonymous objects, so we track down the vnode object
2408                  * to write out.
2409                  * We invalidate (remove) all pages from the address space
2410                  * anyway, for semantic correctness.
2411                  *
2412                  * note: certain anonymous maps, such as MAP_NOSYNC maps,
2413                  * may start out with a NULL object.
2414                  */
2415                 while (object && object->backing_object) {
2416                         offset += object->backing_object_offset;
2417                         object = object->backing_object;
2418                         if (object->size < OFF_TO_IDX( offset + size))
2419                                 size = IDX_TO_OFF(object->size) - offset;
2420                 }
2421                 if (object && (object->type == OBJT_VNODE) && 
2422                     (current->protection & VM_PROT_WRITE)) {
2423                         /*
2424                          * Flush pages if writing is allowed, invalidate them
2425                          * if invalidation requested.  Pages undergoing I/O
2426                          * will be ignored by vm_object_page_remove().
2427                          *
2428                          * We cannot lock the vnode and then wait for paging
2429                          * to complete without deadlocking against vm_fault.
2430                          * Instead we simply call vm_object_page_remove() and
2431                          * allow it to block internally on a page-by-page 
2432                          * basis when it encounters pages undergoing async 
2433                          * I/O.
2434                          */
2435                         int flags;
2436
2437                         vm_object_reference(object);
2438                         vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY);
2439                         flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
2440                         flags |= invalidate ? OBJPC_INVAL : 0;
2441
2442                         /*
2443                          * When operating on a virtual page table just
2444                          * flush the whole object.  XXX we probably ought
2445                          * to 
2446                          */
2447                         switch(current->maptype) {
2448                         case VM_MAPTYPE_NORMAL:
2449                                 vm_object_page_clean(object,
2450                                     OFF_TO_IDX(offset),
2451                                     OFF_TO_IDX(offset + size + PAGE_MASK),
2452                                     flags);
2453                                 break;
2454                         case VM_MAPTYPE_VPAGETABLE:
2455                                 vm_object_page_clean(object, 0, 0, flags);
2456                                 break;
2457                         }
2458                         vn_unlock(((struct vnode *)object->handle));
2459                         vm_object_deallocate(object);
2460                 }
2461                 if (object && invalidate &&
2462                    ((object->type == OBJT_VNODE) ||
2463                     (object->type == OBJT_DEVICE))) {
2464                         int clean_only = 
2465                                 (object->type == OBJT_DEVICE) ? FALSE : TRUE;
2466                         vm_object_reference(object);
2467                         switch(current->maptype) {
2468                         case VM_MAPTYPE_NORMAL:
2469                                 vm_object_page_remove(object,
2470                                     OFF_TO_IDX(offset),
2471                                     OFF_TO_IDX(offset + size + PAGE_MASK),
2472                                     clean_only);
2473                                 break;
2474                         case VM_MAPTYPE_VPAGETABLE:
2475                                 vm_object_page_remove(object, 0, 0, clean_only);
2476                                 break;
2477                         }
2478                         vm_object_deallocate(object);
2479                 }
2480                 start += size;
2481         }
2482
2483         vm_map_unlock_read(map);
2484         return (KERN_SUCCESS);
2485 }
2486
2487 /*
2488  *      vm_map_entry_unwire:    [ internal use only ]
2489  *
2490  *      Make the region specified by this entry pageable.
2491  *
2492  *      The map in question should be locked.
2493  *      [This is the reason for this routine's existence.]
2494  */
2495 static void 
2496 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
2497 {
2498         entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2499         entry->wired_count = 0;
2500         vm_fault_unwire(map, entry);
2501 }
2502
2503 /*
2504  *      vm_map_entry_delete:    [ internal use only ]
2505  *
2506  *      Deallocate the given entry from the target map.
2507  */
2508 static void
2509 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry, int *countp)
2510 {
2511         vm_map_entry_unlink(map, entry);
2512         map->size -= entry->end - entry->start;
2513
2514         switch(entry->maptype) {
2515         case VM_MAPTYPE_NORMAL:
2516         case VM_MAPTYPE_VPAGETABLE:
2517                 vm_object_deallocate(entry->object.vm_object);
2518                 break;
2519         default:
2520                 break;
2521         }
2522
2523         vm_map_entry_dispose(map, entry, countp);
2524 }
2525
2526 /*
2527  *      vm_map_delete:  [ internal use only ]
2528  *
2529  *      Deallocates the given address range from the target
2530  *      map.
2531  */
2532 int
2533 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end, int *countp)
2534 {
2535         vm_object_t object;
2536         vm_map_entry_t entry;
2537         vm_map_entry_t first_entry;
2538
2539 again:
2540         /*
2541          * Find the start of the region, and clip it.  Set entry to point
2542          * at the first record containing the requested address or, if no
2543          * such record exists, the next record with a greater address.  The
2544          * loop will run from this point until a record beyond the termination
2545          * address is encountered.
2546          *
2547          * map->hint must be adjusted to not point to anything we delete,
2548          * so set it to the entry prior to the one being deleted.
2549          *
2550          * GGG see other GGG comment.
2551          */
2552         if (vm_map_lookup_entry(map, start, &first_entry)) {
2553                 entry = first_entry;
2554                 vm_map_clip_start(map, entry, start, countp);
2555                 map->hint = entry->prev;        /* possible problem XXX */
2556         } else {
2557                 map->hint = first_entry;        /* possible problem XXX */
2558                 entry = first_entry->next;
2559         }
2560
2561         /*
2562          * If a hole opens up prior to the current first_free then
2563          * adjust first_free.  As with map->hint, map->first_free
2564          * cannot be left set to anything we might delete.
2565          */
2566         if (entry == &map->header) {
2567                 map->first_free = &map->header;
2568         } else if (map->first_free->start >= start) {
2569                 map->first_free = entry->prev;
2570         }
2571
2572         /*
2573          * Step through all entries in this region
2574          */
2575
2576         while ((entry != &map->header) && (entry->start < end)) {
2577                 vm_map_entry_t next;
2578                 vm_offset_t s, e;
2579                 vm_pindex_t offidxstart, offidxend, count;
2580
2581                 /*
2582                  * If we hit an in-transition entry we have to sleep and
2583                  * retry.  It's easier (and not really slower) to just retry
2584                  * since this case occurs so rarely and the hint is already
2585                  * pointing at the right place.  We have to reset the
2586                  * start offset so as not to accidently delete an entry
2587                  * another process just created in vacated space.
2588                  */
2589                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2590                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2591                         start = entry->start;
2592                         ++mycpu->gd_cnt.v_intrans_coll;
2593                         ++mycpu->gd_cnt.v_intrans_wait;
2594                         vm_map_transition_wait(map);
2595                         goto again;
2596                 }
2597                 vm_map_clip_end(map, entry, end, countp);
2598
2599                 s = entry->start;
2600                 e = entry->end;
2601                 next = entry->next;
2602
2603                 offidxstart = OFF_TO_IDX(entry->offset);
2604                 count = OFF_TO_IDX(e - s);
2605                 object = entry->object.vm_object;
2606
2607                 /*
2608                  * Unwire before removing addresses from the pmap; otherwise,
2609                  * unwiring will put the entries back in the pmap.
2610                  */
2611                 if (entry->wired_count != 0)
2612                         vm_map_entry_unwire(map, entry);
2613
2614                 offidxend = offidxstart + count;
2615
2616                 if (object == &kernel_object) {
2617                         vm_object_page_remove(object, offidxstart, offidxend, FALSE);
2618                 } else {
2619                         pmap_remove(map->pmap, s, e);
2620                         if (object != NULL &&
2621                             object->ref_count != 1 &&
2622                             (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING &&
2623                             (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
2624                                 vm_object_collapse(object);
2625                                 vm_object_page_remove(object, offidxstart, offidxend, FALSE);
2626                                 if (object->type == OBJT_SWAP) {
2627                                         swap_pager_freespace(object, offidxstart, count);
2628                                 }
2629                                 if (offidxend >= object->size &&
2630                                     offidxstart < object->size) {
2631                                         object->size = offidxstart;
2632                                 }
2633                         }
2634                 }
2635
2636                 /*
2637                  * Delete the entry (which may delete the object) only after
2638                  * removing all pmap entries pointing to its pages.
2639                  * (Otherwise, its page frames may be reallocated, and any
2640                  * modify bits will be set in the wrong object!)
2641                  */
2642                 vm_map_entry_delete(map, entry, countp);
2643                 entry = next;
2644         }
2645         return (KERN_SUCCESS);
2646 }
2647
2648 /*
2649  *      vm_map_remove:
2650  *
2651  *      Remove the given address range from the target map.
2652  *      This is the exported form of vm_map_delete.
2653  */
2654 int
2655 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
2656 {
2657         int result;
2658         int count;
2659
2660         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2661         vm_map_lock(map);
2662         VM_MAP_RANGE_CHECK(map, start, end);
2663         result = vm_map_delete(map, start, end, &count);
2664         vm_map_unlock(map);
2665         vm_map_entry_release(count);
2666
2667         return (result);
2668 }
2669
2670 /*
2671  *      vm_map_check_protection:
2672  *
2673  *      Assert that the target map allows the specified
2674  *      privilege on the entire address region given.
2675  *      The entire region must be allocated.
2676  */
2677 boolean_t
2678 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
2679                         vm_prot_t protection)
2680 {
2681         vm_map_entry_t entry;
2682         vm_map_entry_t tmp_entry;
2683
2684         if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
2685                 return (FALSE);
2686         }
2687         entry = tmp_entry;
2688
2689         while (start < end) {
2690                 if (entry == &map->header) {
2691                         return (FALSE);
2692                 }
2693                 /*
2694                  * No holes allowed!
2695                  */
2696
2697                 if (start < entry->start) {
2698                         return (FALSE);
2699                 }
2700                 /*
2701                  * Check protection associated with entry.
2702                  */
2703
2704                 if ((entry->protection & protection) != protection) {
2705                         return (FALSE);
2706                 }
2707                 /* go to next entry */
2708
2709                 start = entry->end;
2710                 entry = entry->next;
2711         }
2712         return (TRUE);
2713 }
2714
2715 /*
2716  * Split the pages in a map entry into a new object.  This affords
2717  * easier removal of unused pages, and keeps object inheritance from
2718  * being a negative impact on memory usage.
2719  */
2720 static void
2721 vm_map_split(vm_map_entry_t entry)
2722 {
2723         vm_page_t m;
2724         vm_object_t orig_object, new_object, source;
2725         vm_offset_t s, e;
2726         vm_pindex_t offidxstart, offidxend, idx;
2727         vm_size_t size;
2728         vm_ooffset_t offset;
2729
2730         orig_object = entry->object.vm_object;
2731         if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP)
2732                 return;
2733         if (orig_object->ref_count <= 1)
2734                 return;
2735
2736         offset = entry->offset;
2737         s = entry->start;
2738         e = entry->end;
2739
2740         offidxstart = OFF_TO_IDX(offset);
2741         offidxend = offidxstart + OFF_TO_IDX(e - s);
2742         size = offidxend - offidxstart;
2743
2744         new_object = vm_pager_allocate(orig_object->type, NULL,
2745                                        IDX_TO_OFF(size), VM_PROT_ALL, 0);
2746         if (new_object == NULL)
2747                 return;
2748
2749         source = orig_object->backing_object;
2750         if (source != NULL) {
2751                 vm_object_reference(source);    /* Referenced by new_object */
2752                 LIST_INSERT_HEAD(&source->shadow_head,
2753                                   new_object, shadow_list);
2754                 vm_object_clear_flag(source, OBJ_ONEMAPPING);
2755                 new_object->backing_object_offset = 
2756                         orig_object->backing_object_offset + IDX_TO_OFF(offidxstart);
2757                 new_object->backing_object = source;
2758                 source->shadow_count++;
2759                 source->generation++;
2760         }
2761
2762         for (idx = 0; idx < size; idx++) {
2763                 vm_page_t m;
2764
2765                 /*
2766                  * A critical section is required to avoid a race between
2767                  * the lookup and an interrupt/unbusy/free and our busy
2768                  * check.
2769                  */
2770                 crit_enter();
2771         retry:
2772                 m = vm_page_lookup(orig_object, offidxstart + idx);
2773                 if (m == NULL) {
2774                         crit_exit();
2775                         continue;
2776                 }
2777
2778                 /*
2779                  * We must wait for pending I/O to complete before we can
2780                  * rename the page.
2781                  *
2782                  * We do not have to VM_PROT_NONE the page as mappings should
2783                  * not be changed by this operation.
2784                  */
2785                 if (vm_page_sleep_busy(m, TRUE, "spltwt"))
2786                         goto retry;
2787                 vm_page_busy(m);
2788                 vm_page_rename(m, new_object, idx);
2789                 /* page automatically made dirty by rename and cache handled */
2790                 vm_page_busy(m);
2791                 crit_exit();
2792         }
2793
2794         if (orig_object->type == OBJT_SWAP) {
2795                 vm_object_pip_add(orig_object, 1);
2796                 /*
2797                  * copy orig_object pages into new_object
2798                  * and destroy unneeded pages in
2799                  * shadow object.
2800                  */
2801                 swap_pager_copy(orig_object, new_object, offidxstart, 0);
2802                 vm_object_pip_wakeup(orig_object);
2803         }
2804
2805         /*
2806          * Wakeup the pages we played with.  No spl protection is needed
2807          * for a simple wakeup.
2808          */
2809         for (idx = 0; idx < size; idx++) {
2810                 m = vm_page_lookup(new_object, idx);
2811                 if (m)
2812                         vm_page_wakeup(m);
2813         }
2814
2815         entry->object.vm_object = new_object;
2816         entry->offset = 0LL;
2817         vm_object_deallocate(orig_object);
2818 }
2819
2820 /*
2821  *      vm_map_copy_entry:
2822  *
2823  *      Copies the contents of the source entry to the destination
2824  *      entry.  The entries *must* be aligned properly.
2825  */
2826 static void
2827 vm_map_copy_entry(vm_map_t src_map, vm_map_t dst_map,
2828         vm_map_entry_t src_entry, vm_map_entry_t dst_entry)
2829 {
2830         vm_object_t src_object;
2831
2832         if (dst_entry->maptype == VM_MAPTYPE_SUBMAP)
2833                 return;
2834         if (src_entry->maptype == VM_MAPTYPE_SUBMAP)
2835                 return;
2836
2837         if (src_entry->wired_count == 0) {
2838                 /*
2839                  * If the source entry is marked needs_copy, it is already
2840                  * write-protected.
2841                  */
2842                 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
2843                         pmap_protect(src_map->pmap,
2844                             src_entry->start,
2845                             src_entry->end,
2846                             src_entry->protection & ~VM_PROT_WRITE);
2847                 }
2848
2849                 /*
2850                  * Make a copy of the object.
2851                  */
2852                 if ((src_object = src_entry->object.vm_object) != NULL) {
2853                         if ((src_object->handle == NULL) &&
2854                                 (src_object->type == OBJT_DEFAULT ||
2855                                  src_object->type == OBJT_SWAP)) {
2856                                 vm_object_collapse(src_object);
2857                                 if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
2858                                         vm_map_split(src_entry);
2859                                         src_object = src_entry->object.vm_object;
2860                                 }
2861                         }
2862
2863                         vm_object_reference(src_object);
2864                         vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
2865                         dst_entry->object.vm_object = src_object;
2866                         src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2867                         dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2868                         dst_entry->offset = src_entry->offset;
2869                 } else {
2870                         dst_entry->object.vm_object = NULL;
2871                         dst_entry->offset = 0;
2872                 }
2873
2874                 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
2875                     dst_entry->end - dst_entry->start, src_entry->start);
2876         } else {
2877                 /*
2878                  * Of course, wired down pages can't be set copy-on-write.
2879                  * Cause wired pages to be copied into the new map by
2880                  * simulating faults (the new pages are pageable)
2881                  */
2882                 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
2883         }
2884 }
2885
2886 /*
2887  * vmspace_fork:
2888  * Create a new process vmspace structure and vm_map
2889  * based on those of an existing process.  The new map
2890  * is based on the old map, according to the inheritance
2891  * values on the regions in that map.
2892  *
2893  * The source map must not be locked.
2894  */
2895 struct vmspace *
2896 vmspace_fork(struct vmspace *vm1)
2897 {
2898         struct vmspace *vm2;
2899         vm_map_t old_map = &vm1->vm_map;
2900         vm_map_t new_map;
2901         vm_map_entry_t old_entry;
2902         vm_map_entry_t new_entry;
2903         vm_object_t object;
2904         int count;
2905
2906         vm_map_lock(old_map);
2907         old_map->infork = 1;
2908
2909         /*
2910          * XXX Note: upcalls are not copied.
2911          */
2912         vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
2913         bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
2914             (caddr_t)&vm1->vm_endcopy - (caddr_t)&vm1->vm_startcopy);
2915         new_map = &vm2->vm_map; /* XXX */
2916         new_map->timestamp = 1;
2917
2918         count = 0;
2919         old_entry = old_map->header.next;
2920         while (old_entry != &old_map->header) {
2921                 ++count;
2922                 old_entry = old_entry->next;
2923         }
2924
2925         count = vm_map_entry_reserve(count + MAP_RESERVE_COUNT);
2926
2927         old_entry = old_map->header.next;
2928         while (old_entry != &old_map->header) {
2929                 if (old_entry->maptype == VM_MAPTYPE_SUBMAP)
2930                         panic("vm_map_fork: encountered a submap");
2931
2932                 switch (old_entry->inheritance) {
2933                 case VM_INHERIT_NONE:
2934                         break;
2935
2936                 case VM_INHERIT_SHARE:
2937                         /*
2938                          * Clone the entry, creating the shared object if
2939                          * necessary.
2940                          */
2941                         object = old_entry->object.vm_object;
2942                         if (object == NULL) {
2943                                 vm_map_entry_allocate_object(old_entry);
2944                                 object = old_entry->object.vm_object;
2945                         }
2946
2947                         /*
2948                          * Add the reference before calling vm_map_entry_shadow
2949                          * to insure that a shadow object is created.
2950                          */
2951                         vm_object_reference(object);
2952                         if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2953                                 vm_map_entry_shadow(old_entry);
2954                                 /* Transfer the second reference too. */
2955                                 vm_object_reference(
2956                                     old_entry->object.vm_object);
2957                                 vm_object_deallocate(object);
2958                                 object = old_entry->object.vm_object;
2959                         }
2960                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
2961
2962                         /*
2963                          * Clone the entry, referencing the shared object.
2964                          */
2965                         new_entry = vm_map_entry_create(new_map, &count);
2966                         *new_entry = *old_entry;
2967                         new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2968                         new_entry->wired_count = 0;
2969
2970                         /*
2971                          * Insert the entry into the new map -- we know we're
2972                          * inserting at the end of the new map.
2973                          */
2974
2975                         vm_map_entry_link(new_map, new_map->header.prev,
2976                             new_entry);
2977
2978                         /*
2979                          * Update the physical map
2980                          */
2981
2982                         pmap_copy(new_map->pmap, old_map->pmap,
2983                             new_entry->start,
2984                             (old_entry->end - old_entry->start),
2985                             old_entry->start);
2986                         break;
2987
2988                 case VM_INHERIT_COPY:
2989                         /*
2990                          * Clone the entry and link into the map.
2991                          */
2992                         new_entry = vm_map_entry_create(new_map, &count);
2993                         *new_entry = *old_entry;
2994                         new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2995                         new_entry->wired_count = 0;
2996                         new_entry->object.vm_object = NULL;
2997                         vm_map_entry_link(new_map, new_map->header.prev,
2998                             new_entry);
2999                         vm_map_copy_entry(old_map, new_map, old_entry,
3000                             new_entry);
3001                         break;
3002                 }
3003                 old_entry = old_entry->next;
3004         }
3005
3006         new_map->size = old_map->size;
3007         old_map->infork = 0;
3008         vm_map_unlock(old_map);
3009         vm_map_entry_release(count);
3010
3011         return (vm2);
3012 }
3013
3014 int
3015 vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
3016               int flags, vm_prot_t prot, vm_prot_t max, int cow)
3017 {
3018         vm_map_entry_t  prev_entry;
3019         vm_map_entry_t  new_stack_entry;
3020         vm_size_t       init_ssize;
3021         int             rv;
3022         int             count;
3023         vm_offset_t     tmpaddr;
3024
3025         cow |= MAP_IS_STACK;
3026
3027         if (max_ssize < sgrowsiz)
3028                 init_ssize = max_ssize;
3029         else
3030                 init_ssize = sgrowsiz;
3031
3032         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
3033         vm_map_lock(map);
3034
3035         /*
3036          * Find space for the mapping
3037          */
3038         if ((flags & MAP_FIXED) == 0) {
3039                 if (vm_map_findspace(map, addrbos, max_ssize, 1,
3040                                      flags, &tmpaddr)) {
3041                         vm_map_unlock(map);
3042                         vm_map_entry_release(count);
3043                         return (KERN_NO_SPACE);
3044                 }
3045                 addrbos = tmpaddr;
3046         }
3047
3048         /* If addr is already mapped, no go */
3049         if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
3050                 vm_map_unlock(map);
3051                 vm_map_entry_release(count);
3052                 return (KERN_NO_SPACE);
3053         }
3054
3055 #if 0
3056         /* XXX already handled by kern_mmap() */
3057         /* If we would blow our VMEM resource limit, no go */
3058         if (map->size + init_ssize >
3059             curproc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
3060                 vm_map_unlock(map);
3061                 vm_map_entry_release(count);
3062                 return (KERN_NO_SPACE);
3063         }
3064 #endif
3065
3066         /*
3067          * If we can't accomodate max_ssize in the current mapping,
3068          * no go.  However, we need to be aware that subsequent user
3069          * mappings might map into the space we have reserved for
3070          * stack, and currently this space is not protected.  
3071          * 
3072          * Hopefully we will at least detect this condition 
3073          * when we try to grow the stack.
3074          */
3075         if ((prev_entry->next != &map->header) &&
3076             (prev_entry->next->start < addrbos + max_ssize)) {
3077                 vm_map_unlock(map);
3078                 vm_map_entry_release(count);
3079                 return (KERN_NO_SPACE);
3080         }
3081
3082         /*
3083          * We initially map a stack of only init_ssize.  We will
3084          * grow as needed later.  Since this is to be a grow 
3085          * down stack, we map at the top of the range.
3086          *
3087          * Note: we would normally expect prot and max to be
3088          * VM_PROT_ALL, and cow to be 0.  Possibly we should
3089          * eliminate these as input parameters, and just
3090          * pass these values here in the insert call.
3091          */
3092         rv = vm_map_insert(map, &count,
3093                            NULL, 0, addrbos + max_ssize - init_ssize,
3094                            addrbos + max_ssize,
3095                            VM_MAPTYPE_NORMAL,
3096                            prot, max,
3097                            cow);
3098
3099         /* Now set the avail_ssize amount */
3100         if (rv == KERN_SUCCESS) {
3101                 if (prev_entry != &map->header)
3102                         vm_map_clip_end(map, prev_entry, addrbos + max_ssize - init_ssize, &count);
3103                 new_stack_entry = prev_entry->next;
3104                 if (new_stack_entry->end   != addrbos + max_ssize ||
3105                     new_stack_entry->start != addrbos + max_ssize - init_ssize)
3106                         panic ("Bad entry start/end for new stack entry");
3107                 else 
3108                         new_stack_entry->aux.avail_ssize = max_ssize - init_ssize;
3109         }
3110
3111         vm_map_unlock(map);
3112         vm_map_entry_release(count);
3113         return (rv);
3114 }
3115
3116 /* Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
3117  * desired address is already mapped, or if we successfully grow
3118  * the stack.  Also returns KERN_SUCCESS if addr is outside the
3119  * stack range (this is strange, but preserves compatibility with
3120  * the grow function in vm_machdep.c).
3121  */
3122 int
3123 vm_map_growstack (struct proc *p, vm_offset_t addr)
3124 {
3125         vm_map_entry_t prev_entry;
3126         vm_map_entry_t stack_entry;
3127         vm_map_entry_t new_stack_entry;
3128         struct vmspace *vm = p->p_vmspace;
3129         vm_map_t map = &vm->vm_map;
3130         vm_offset_t    end;
3131         int grow_amount;
3132         int rv = KERN_SUCCESS;
3133         int is_procstack;
3134         int use_read_lock = 1;
3135         int count;
3136
3137         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
3138 Retry:
3139         if (use_read_lock)
3140                 vm_map_lock_read(map);
3141         else
3142                 vm_map_lock(map);
3143
3144         /* If addr is already in the entry range, no need to grow.*/
3145         if (vm_map_lookup_entry(map, addr, &prev_entry))
3146                 goto done;
3147
3148         if ((stack_entry = prev_entry->next) == &map->header)
3149                 goto done;
3150         if (prev_entry == &map->header) 
3151                 end = stack_entry->start - stack_entry->aux.avail_ssize;
3152         else
3153                 end = prev_entry->end;
3154
3155         /*
3156          * This next test mimics the old grow function in vm_machdep.c.
3157          * It really doesn't quite make sense, but we do it anyway
3158          * for compatibility.
3159          *
3160          * If not growable stack, return success.  This signals the
3161          * caller to proceed as he would normally with normal vm.
3162          */
3163         if (stack_entry->aux.avail_ssize < 1 ||
3164             addr >= stack_entry->start ||
3165             addr <  stack_entry->start - stack_entry->aux.avail_ssize) {
3166                 goto done;
3167         } 
3168         
3169         /* Find the minimum grow amount */
3170         grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE);
3171         if (grow_amount > stack_entry->aux.avail_ssize) {
3172                 rv = KERN_NO_SPACE;
3173                 goto done;
3174         }
3175
3176         /*
3177          * If there is no longer enough space between the entries
3178          * nogo, and adjust the available space.  Note: this 
3179          * should only happen if the user has mapped into the
3180          * stack area after the stack was created, and is
3181          * probably an error.
3182          *
3183          * This also effectively destroys any guard page the user
3184          * might have intended by limiting the stack size.
3185          */
3186         if (grow_amount > stack_entry->start - end) {
3187                 if (use_read_lock && vm_map_lock_upgrade(map)) {
3188                         use_read_lock = 0;
3189                         goto Retry;
3190                 }
3191                 use_read_lock = 0;
3192                 stack_entry->aux.avail_ssize = stack_entry->start - end;
3193                 rv = KERN_NO_SPACE;
3194                 goto done;
3195         }
3196
3197         is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr;
3198
3199         /* If this is the main process stack, see if we're over the 
3200          * stack limit.
3201          */
3202         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
3203                              p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
3204                 rv = KERN_NO_SPACE;
3205                 goto done;
3206         }
3207
3208         /* Round up the grow amount modulo SGROWSIZ */
3209         grow_amount = roundup (grow_amount, sgrowsiz);
3210         if (grow_amount > stack_entry->aux.avail_ssize) {
3211                 grow_amount = stack_entry->aux.avail_ssize;
3212         }
3213         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
3214                              p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
3215                 grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
3216                               ctob(vm->vm_ssize);
3217         }
3218
3219         /* If we would blow our VMEM resource limit, no go */
3220         if (map->size + grow_amount > p->p_rlimit[RLIMIT_VMEM].rlim_cur) {
3221                 rv = KERN_NO_SPACE;
3222                 goto done;
3223         }
3224
3225         if (use_read_lock && vm_map_lock_upgrade(map)) {
3226                 use_read_lock = 0;
3227                 goto Retry;
3228         }
3229         use_read_lock = 0;
3230
3231         /* Get the preliminary new entry start value */
3232         addr = stack_entry->start - grow_amount;
3233
3234         /* If this puts us into the previous entry, cut back our growth
3235          * to the available space.  Also, see the note above.
3236          */
3237         if (addr < end) {
3238                 stack_entry->aux.avail_ssize = stack_entry->start - end;
3239                 addr = end;
3240         }
3241
3242         rv = vm_map_insert(map, &count,
3243                            NULL, 0, addr, stack_entry->start,
3244                            VM_MAPTYPE_NORMAL,
3245                            VM_PROT_ALL, VM_PROT_ALL,
3246                            0);
3247
3248         /* Adjust the available stack space by the amount we grew. */
3249         if (rv == KERN_SUCCESS) {
3250                 if (prev_entry != &map->header)
3251                         vm_map_clip_end(map, prev_entry, addr, &count);
3252                 new_stack_entry = prev_entry->next;
3253                 if (new_stack_entry->end   != stack_entry->start  ||
3254                     new_stack_entry->start != addr)
3255                         panic ("Bad stack grow start/end in new stack entry");
3256                 else {
3257                         new_stack_entry->aux.avail_ssize =
3258                                 stack_entry->aux.avail_ssize -
3259                                 (new_stack_entry->end - new_stack_entry->start);
3260                         if (is_procstack)
3261                                 vm->vm_ssize += btoc(new_stack_entry->end -
3262                                                      new_stack_entry->start);
3263                 }
3264         }
3265
3266 done:
3267         if (use_read_lock)
3268                 vm_map_unlock_read(map);
3269         else
3270                 vm_map_unlock(map);
3271         vm_map_entry_release(count);
3272         return (rv);
3273 }
3274
3275 /*
3276  * Unshare the specified VM space for exec.  If other processes are
3277  * mapped to it, then create a new one.  The new vmspace is null.
3278  */
3279 void
3280 vmspace_exec(struct proc *p, struct vmspace *vmcopy) 
3281 {
3282         struct vmspace *oldvmspace = p->p_vmspace;
3283         struct vmspace *newvmspace;
3284         vm_map_t map = &p->p_vmspace->vm_map;
3285
3286         /*
3287          * If we are execing a resident vmspace we fork it, otherwise
3288          * we create a new vmspace.  Note that exitingcnt and upcalls
3289          * are not copied to the new vmspace.
3290          */
3291         if (vmcopy)  {
3292             newvmspace = vmspace_fork(vmcopy);
3293         } else {
3294             newvmspace = vmspace_alloc(map->min_offset, map->max_offset);
3295             bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
3296                 (caddr_t)&oldvmspace->vm_endcopy - 
3297                     (caddr_t)&oldvmspace->vm_startcopy);
3298         }
3299
3300         /*
3301          * Finish initializing the vmspace before assigning it
3302          * to the process.  The vmspace will become the current vmspace
3303          * if p == curproc.
3304          */
3305         pmap_pinit2(vmspace_pmap(newvmspace));
3306         pmap_replacevm(p, newvmspace, 0);
3307         sysref_put(&oldvmspace->vm_sysref);
3308 }
3309
3310 /*
3311  * Unshare the specified VM space for forcing COW.  This
3312  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
3313  *
3314  * The exitingcnt test is not strictly necessary but has been
3315  * included for code sanity (to make the code a bit more deterministic).
3316  */
3317
3318 void
3319 vmspace_unshare(struct proc *p) 
3320 {
3321         struct vmspace *oldvmspace = p->p_vmspace;
3322         struct vmspace *newvmspace;
3323
3324         if (oldvmspace->vm_sysref.refcnt == 1 && oldvmspace->vm_exitingcnt == 0)
3325                 return;
3326         newvmspace = vmspace_fork(oldvmspace);
3327         pmap_pinit2(vmspace_pmap(newvmspace));
3328         pmap_replacevm(p, newvmspace, 0);
3329         sysref_put(&oldvmspace->vm_sysref);
3330 }
3331
3332 /*
3333  *      vm_map_lookup:
3334  *
3335  *      Finds the VM object, offset, and
3336  *      protection for a given virtual address in the
3337  *      specified map, assuming a page fault of the
3338  *      type specified.
3339  *
3340  *      Leaves the map in question locked for read; return
3341  *      values are guaranteed until a vm_map_lookup_done
3342  *      call is performed.  Note that the map argument
3343  *      is in/out; the returned map must be used in
3344  *      the call to vm_map_lookup_done.
3345  *
3346  *      A handle (out_entry) is returned for use in
3347  *      vm_map_lookup_done, to make that fast.
3348  *
3349  *      If a lookup is requested with "write protection"
3350  *      specified, the map may be changed to perform virtual
3351  *      copying operations, although the data referenced will
3352  *      remain the same.
3353  */
3354 int
3355 vm_map_lookup(vm_map_t *var_map,                /* IN/OUT */
3356               vm_offset_t vaddr,
3357               vm_prot_t fault_typea,
3358               vm_map_entry_t *out_entry,        /* OUT */
3359               vm_object_t *object,              /* OUT */
3360               vm_pindex_t *pindex,              /* OUT */
3361               vm_prot_t *out_prot,              /* OUT */
3362               boolean_t *wired)                 /* OUT */
3363 {
3364         vm_map_entry_t entry;
3365         vm_map_t map = *var_map;
3366         vm_prot_t prot;
3367         vm_prot_t fault_type = fault_typea;
3368         int use_read_lock = 1;
3369         int rv = KERN_SUCCESS;
3370
3371 RetryLookup:
3372         if (use_read_lock)
3373                 vm_map_lock_read(map);
3374         else
3375                 vm_map_lock(map);
3376
3377         /*
3378          * If the map has an interesting hint, try it before calling full
3379          * blown lookup routine.
3380          */
3381         entry = map->hint;
3382         *out_entry = entry;
3383
3384         if ((entry == &map->header) ||
3385             (vaddr < entry->start) || (vaddr >= entry->end)) {
3386                 vm_map_entry_t tmp_entry;
3387
3388                 /*
3389                  * Entry was either not a valid hint, or the vaddr was not
3390                  * contained in the entry, so do a full lookup.
3391                  */
3392                 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) {
3393                         rv = KERN_INVALID_ADDRESS;
3394                         goto done;
3395                 }
3396
3397                 entry = tmp_entry;
3398                 *out_entry = entry;
3399         }
3400         
3401         /*
3402          * Handle submaps.
3403          */
3404         if (entry->maptype == VM_MAPTYPE_SUBMAP) {
3405                 vm_map_t old_map = map;
3406
3407                 *var_map = map = entry->object.sub_map;
3408                 if (use_read_lock)
3409                         vm_map_unlock_read(old_map);
3410                 else
3411                         vm_map_unlock(old_map);
3412                 use_read_lock = 1;
3413                 goto RetryLookup;
3414         }
3415
3416         /*
3417          * Check whether this task is allowed to have this page.
3418          * Note the special case for MAP_ENTRY_COW
3419          * pages with an override.  This is to implement a forced
3420          * COW for debuggers.
3421          */
3422
3423         if (fault_type & VM_PROT_OVERRIDE_WRITE)
3424                 prot = entry->max_protection;
3425         else
3426                 prot = entry->protection;
3427
3428         fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
3429         if ((fault_type & prot) != fault_type) {
3430                 rv = KERN_PROTECTION_FAILURE;
3431                 goto done;
3432         }
3433
3434         if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
3435             (entry->eflags & MAP_ENTRY_COW) &&
3436             (fault_type & VM_PROT_WRITE) &&
3437             (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
3438                 rv = KERN_PROTECTION_FAILURE;
3439                 goto done;
3440         }
3441
3442         /*
3443          * If this page is not pageable, we have to get it for all possible
3444          * accesses.
3445          */
3446         *wired = (entry->wired_count != 0);
3447         if (*wired)
3448                 prot = fault_type = entry->protection;
3449
3450         /*
3451          * Virtual page tables may need to update the accessed (A) bit
3452          * in a page table entry.  Upgrade the fault to a write fault for
3453          * that case if the map will support it.  If the map does not support
3454          * it the page table entry simply will not be updated.
3455          */
3456         if (entry->maptype == VM_MAPTYPE_VPAGETABLE) {
3457                 if (prot & VM_PROT_WRITE)
3458                         fault_type |= VM_PROT_WRITE;
3459         }
3460
3461         /*
3462          * If the entry was copy-on-write, we either ...
3463          */
3464         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3465                 /*
3466                  * If we want to write the page, we may as well handle that
3467                  * now since we've got the map locked.
3468                  *
3469                  * If we don't need to write the page, we just demote the
3470                  * permissions allowed.
3471                  */
3472
3473                 if (fault_type & VM_PROT_WRITE) {
3474                         /*
3475                          * Make a new object, and place it in the object
3476                          * chain.  Note that no new references have appeared
3477                          * -- one just moved from the map to the new
3478                          * object.
3479                          */
3480
3481                         if (use_read_lock && vm_map_lock_upgrade(map)) {
3482                                 use_read_lock = 0;
3483                                 goto RetryLookup;
3484                         }
3485                         use_read_lock = 0;
3486
3487                         vm_map_entry_shadow(entry);
3488                 } else {
3489                         /*
3490                          * We're attempting to read a copy-on-write page --
3491                          * don't allow writes.
3492                          */
3493
3494                         prot &= ~VM_PROT_WRITE;
3495                 }
3496         }
3497
3498         /*
3499          * Create an object if necessary.
3500          */
3501         if (entry->object.vm_object == NULL &&
3502             !map->system_map) {
3503                 if (use_read_lock && vm_map_lock_upgrade(map))  {
3504                         use_read_lock = 0;
3505                         goto RetryLookup;
3506                 }
3507                 use_read_lock = 0;
3508                 vm_map_entry_allocate_object(entry);
3509         }
3510
3511         /*
3512          * Return the object/offset from this entry.  If the entry was
3513          * copy-on-write or empty, it has been fixed up.
3514          */
3515
3516         *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
3517         *object = entry->object.vm_object;
3518
3519         /*
3520          * Return whether this is the only map sharing this data.  On
3521          * success we return with a read lock held on the map.  On failure
3522          * we return with the map unlocked.
3523          */
3524         *out_prot = prot;
3525 done:
3526         if (rv == KERN_SUCCESS) {
3527                 if (use_read_lock == 0)
3528                         vm_map_lock_downgrade(map);
3529         } else if (use_read_lock) {
3530                 vm_map_unlock_read(map);
3531         } else {
3532                 vm_map_unlock(map);
3533         }
3534         return (rv);
3535 }
3536
3537 /*
3538  *      vm_map_lookup_done:
3539  *
3540  *      Releases locks acquired by a vm_map_lookup
3541  *      (according to the handle returned by that lookup).
3542  */
3543
3544 void
3545 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry, int count)
3546 {
3547         /*
3548          * Unlock the main-level map
3549          */
3550         vm_map_unlock_read(map);
3551         if (count)
3552                 vm_map_entry_release(count);
3553 }
3554
3555 #include "opt_ddb.h"
3556 #ifdef DDB
3557 #include <sys/kernel.h>
3558
3559 #include <ddb/ddb.h>
3560
3561 /*
3562  *      vm_map_print:   [ debug ]
3563  */
3564 DB_SHOW_COMMAND(map, vm_map_print)
3565 {
3566         static int nlines;
3567         /* XXX convert args. */
3568         vm_map_t map = (vm_map_t)addr;
3569         boolean_t full = have_addr;
3570
3571         vm_map_entry_t entry;
3572
3573         db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
3574             (void *)map,
3575             (void *)map->pmap, map->nentries, map->timestamp);
3576         nlines++;
3577
3578         if (!full && db_indent)
3579                 return;
3580
3581         db_indent += 2;
3582         for (entry = map->header.next; entry != &map->header;
3583             entry = entry->next) {
3584                 db_iprintf("map entry %p: start=%p, end=%p\n",
3585                     (void *)entry, (void *)entry->start, (void *)entry->end);
3586                 nlines++;
3587                 {
3588                         static char *inheritance_name[4] =
3589                         {"share", "copy", "none", "donate_copy"};
3590
3591                         db_iprintf(" prot=%x/%x/%s",
3592                             entry->protection,
3593                             entry->max_protection,
3594                             inheritance_name[(int)(unsigned char)entry->inheritance]);
3595                         if (entry->wired_count != 0)
3596                                 db_printf(", wired");
3597                 }
3598                 if (entry->maptype == VM_MAPTYPE_SUBMAP) {
3599                         /* XXX no %qd in kernel.  Truncate entry->offset. */
3600                         db_printf(", share=%p, offset=0x%lx\n",
3601                             (void *)entry->object.sub_map,
3602                             (long)entry->offset);
3603                         nlines++;
3604                         if ((entry->prev == &map->header) ||
3605                             (entry->prev->object.sub_map !=
3606                                 entry->object.sub_map)) {
3607                                 db_indent += 2;
3608                                 vm_map_print((db_expr_t)(intptr_t)
3609                                              entry->object.sub_map,
3610                                              full, 0, NULL);
3611                                 db_indent -= 2;
3612                         }
3613                 } else {
3614                         /* XXX no %qd in kernel.  Truncate entry->offset. */
3615                         db_printf(", object=%p, offset=0x%lx",
3616                             (void *)entry->object.vm_object,
3617                             (long)entry->offset);
3618                         if (entry->eflags & MAP_ENTRY_COW)
3619                                 db_printf(", copy (%s)",
3620                                     (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
3621                         db_printf("\n");
3622                         nlines++;
3623
3624                         if ((entry->prev == &map->header) ||
3625                             (entry->prev->object.vm_object !=
3626                                 entry->object.vm_object)) {
3627                                 db_indent += 2;
3628                                 vm_object_print((db_expr_t)(intptr_t)
3629                                                 entry->object.vm_object,
3630                                                 full, 0, NULL);
3631                                 nlines += 4;
3632                                 db_indent -= 2;
3633                         }
3634                 }
3635         }
3636         db_indent -= 2;
3637         if (db_indent == 0)
3638                 nlines = 0;
3639 }
3640
3641
3642 DB_SHOW_COMMAND(procvm, procvm)
3643 {
3644         struct proc *p;
3645
3646         if (have_addr) {
3647                 p = (struct proc *) addr;
3648         } else {
3649                 p = curproc;
3650         }
3651
3652         db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
3653             (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
3654             (void *)vmspace_pmap(p->p_vmspace));
3655
3656         vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
3657 }
3658
3659 #endif /* DDB */