5164a400c9d9c6eb845ae196945de80b77fe9cb1
[dragonfly.git] / sys / vm / vm_map.c
1 /*
2  * Copyright (c) 1991, 1993
3  *      The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by the University of
19  *      California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *      from: @(#)vm_map.c      8.3 (Berkeley) 1/12/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $FreeBSD: src/sys/vm/vm_map.c,v 1.187.2.19 2003/05/27 00:47:02 alc Exp $
65  * $DragonFly: src/sys/vm/vm_map.c,v 1.53 2006/12/28 18:29:08 dillon Exp $
66  */
67
68 /*
69  *      Virtual memory mapping module.
70  */
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/proc.h>
75 #include <sys/lock.h>
76 #include <sys/vmmeter.h>
77 #include <sys/mman.h>
78 #include <sys/vnode.h>
79 #include <sys/resourcevar.h>
80 #include <sys/shm.h>
81 #include <sys/tree.h>
82
83 #include <vm/vm.h>
84 #include <vm/vm_param.h>
85 #include <vm/pmap.h>
86 #include <vm/vm_map.h>
87 #include <vm/vm_page.h>
88 #include <vm/vm_object.h>
89 #include <vm/vm_pager.h>
90 #include <vm/vm_kern.h>
91 #include <vm/vm_extern.h>
92 #include <vm/swap_pager.h>
93 #include <vm/vm_zone.h>
94
95 #include <sys/thread2.h>
96
97 /*
98  *      Virtual memory maps provide for the mapping, protection,
99  *      and sharing of virtual memory objects.  In addition,
100  *      this module provides for an efficient virtual copy of
101  *      memory from one map to another.
102  *
103  *      Synchronization is required prior to most operations.
104  *
105  *      Maps consist of an ordered doubly-linked list of simple
106  *      entries; a single hint is used to speed up lookups.
107  *
108  *      Since portions of maps are specified by start/end addresses,
109  *      which may not align with existing map entries, all
110  *      routines merely "clip" entries to these start/end values.
111  *      [That is, an entry is split into two, bordering at a
112  *      start or end value.]  Note that these clippings may not
113  *      always be necessary (as the two resulting entries are then
114  *      not changed); however, the clipping is done for convenience.
115  *
116  *      As mentioned above, virtual copy operations are performed
117  *      by copying VM object references from one map to
118  *      another, and then marking both regions as copy-on-write.
119  */
120
121 /*
122  *      vm_map_startup:
123  *
124  *      Initialize the vm_map module.  Must be called before
125  *      any other vm_map routines.
126  *
127  *      Map and entry structures are allocated from the general
128  *      purpose memory pool with some exceptions:
129  *
130  *      - The kernel map and kmem submap are allocated statically.
131  *      - Kernel map entries are allocated out of a static pool.
132  *
133  *      These restrictions are necessary since malloc() uses the
134  *      maps and requires map entries.
135  */
136
137 #define VMEPERCPU       2
138
139 static struct vm_zone mapentzone_store, mapzone_store;
140 static vm_zone_t mapentzone, mapzone, vmspace_zone;
141 static struct vm_object mapentobj, mapobj;
142
143 static struct vm_map_entry map_entry_init[MAX_MAPENT];
144 static struct vm_map_entry cpu_map_entry_init[MAXCPU][VMEPERCPU];
145 static struct vm_map map_init[MAX_KMAP];
146
147 static void vm_map_entry_shadow(vm_map_entry_t entry);
148 static vm_map_entry_t vm_map_entry_create(vm_map_t map, int *);
149 static void vm_map_entry_dispose (vm_map_t map, vm_map_entry_t entry, int *);
150 static void _vm_map_clip_end (vm_map_t, vm_map_entry_t, vm_offset_t, int *);
151 static void _vm_map_clip_start (vm_map_t, vm_map_entry_t, vm_offset_t, int *);
152 static void vm_map_entry_delete (vm_map_t, vm_map_entry_t, int *);
153 static void vm_map_entry_unwire (vm_map_t, vm_map_entry_t);
154 static void vm_map_copy_entry (vm_map_t, vm_map_t, vm_map_entry_t,
155                 vm_map_entry_t);
156 static void vm_map_split (vm_map_entry_t);
157 static void vm_map_unclip_range (vm_map_t map, vm_map_entry_t start_entry, vm_offset_t start, vm_offset_t end, int *count, int flags);
158
159 void
160 vm_map_startup(void)
161 {
162         mapzone = &mapzone_store;
163         zbootinit(mapzone, "MAP", sizeof (struct vm_map),
164                 map_init, MAX_KMAP);
165         mapentzone = &mapentzone_store;
166         zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry),
167                 map_entry_init, MAX_MAPENT);
168 }
169
170 /*
171  * Red black tree functions
172  */
173 static int rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b);
174 RB_GENERATE(vm_map_rb_tree, vm_map_entry, rb_entry, rb_vm_map_compare);
175
176 /* a->start is address, and the only field has to be initialized */
177 static int
178 rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b)
179 {
180         if (a->start < b->start)
181                 return(-1);
182         else if (a->start > b->start)
183                 return(1);
184         return(0);
185 }
186
187 /*
188  * Allocate a vmspace structure, including a vm_map and pmap,
189  * and initialize those structures.  The refcnt is set to 1.
190  * The remaining fields must be initialized by the caller.
191  */
192 struct vmspace *
193 vmspace_alloc(vm_offset_t min, vm_offset_t max)
194 {
195         struct vmspace *vm;
196
197         vm = zalloc(vmspace_zone);
198         bzero(&vm->vm_startcopy,
199                 (char *)&vm->vm_endcopy - (char *)&vm->vm_startcopy);
200         vm_map_init(&vm->vm_map, min, max);
201         pmap_pinit(vmspace_pmap(vm));
202         vm->vm_map.pmap = vmspace_pmap(vm);             /* XXX */
203         vm->vm_refcnt = 1;
204         vm->vm_shm = NULL;
205         vm->vm_exitingcnt = 0;
206         return (vm);
207 }
208
209 void
210 vm_init2(void) 
211 {
212         zinitna(mapentzone, &mapentobj, NULL, 0, 0, 
213                 ZONE_USE_RESERVE | ZONE_SPECIAL, 1);
214         zinitna(mapzone, &mapobj, NULL, 0, 0, 0, 1);
215         vmspace_zone = zinit("VMSPACE", sizeof (struct vmspace), 0, 0, 3);
216         pmap_init2();
217         vm_object_init2();
218 }
219
220 static __inline void
221 vmspace_dofree(struct vmspace *vm)
222 {
223         int count;
224
225         /*
226          * Make sure any SysV shm is freed, it might not have in
227          * exit1()
228          */
229         shmexit(vm);
230
231         KKASSERT(vm->vm_upcalls == NULL);
232
233         /*
234          * Lock the map, to wait out all other references to it.
235          * Delete all of the mappings and pages they hold, then call
236          * the pmap module to reclaim anything left.
237          */
238         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
239         vm_map_lock(&vm->vm_map);
240         vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
241                 vm->vm_map.max_offset, &count);
242         vm_map_unlock(&vm->vm_map);
243         vm_map_entry_release(count);
244
245         pmap_release(vmspace_pmap(vm));
246         zfree(vmspace_zone, vm);
247 }
248
249 void
250 vmspace_free(struct vmspace *vm)
251 {
252         if (vm->vm_refcnt == 0)
253                 panic("vmspace_free: attempt to free already freed vmspace");
254
255         if (--vm->vm_refcnt == 0 && vm->vm_exitingcnt == 0)
256                 vmspace_dofree(vm);
257 }
258
259 void
260 vmspace_exitfree(struct proc *p)
261 {
262         struct vmspace *vm;
263
264         vm = p->p_vmspace;
265         p->p_vmspace = NULL;
266
267         /*
268          * cleanup by parent process wait()ing on exiting child.  vm_refcnt
269          * may not be 0 (e.g. fork() and child exits without exec()ing).
270          * exitingcnt may increment above 0 and drop back down to zero
271          * several times while vm_refcnt is held non-zero.  vm_refcnt
272          * may also increment above 0 and drop back down to zero several
273          * times while vm_exitingcnt is held non-zero.
274          *
275          * The last wait on the exiting child's vmspace will clean up
276          * the remainder of the vmspace.
277          */
278         if (--vm->vm_exitingcnt == 0 && vm->vm_refcnt == 0)
279                 vmspace_dofree(vm);
280 }
281
282 /*
283  * vmspace_swap_count() - count the approximate swap useage in pages for a
284  *                        vmspace.
285  *
286  *      Swap useage is determined by taking the proportional swap used by
287  *      VM objects backing the VM map.  To make up for fractional losses,
288  *      if the VM object has any swap use at all the associated map entries
289  *      count for at least 1 swap page.
290  */
291 int
292 vmspace_swap_count(struct vmspace *vmspace)
293 {
294         vm_map_t map = &vmspace->vm_map;
295         vm_map_entry_t cur;
296         vm_object_t object;
297         int count = 0;
298         int n;
299
300         for (cur = map->header.next; cur != &map->header; cur = cur->next) {
301                 switch(cur->maptype) {
302                 case VM_MAPTYPE_NORMAL:
303                 case VM_MAPTYPE_VPAGETABLE:
304                         if ((object = cur->object.vm_object) == NULL)
305                                 break;
306                         if (object->type != OBJT_SWAP)
307                                 break;
308                         n = (cur->end - cur->start) / PAGE_SIZE;
309                         if (object->un_pager.swp.swp_bcount) {
310                                 count += object->un_pager.swp.swp_bcount *
311                                     SWAP_META_PAGES * n / object->size + 1;
312                         }
313                         break;
314                 default:
315                         break;
316                 }
317         }
318         return(count);
319 }
320
321
322 /*
323  *      vm_map_create:
324  *
325  *      Creates and returns a new empty VM map with
326  *      the given physical map structure, and having
327  *      the given lower and upper address bounds.
328  */
329 vm_map_t
330 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
331 {
332         vm_map_t result;
333
334         result = zalloc(mapzone);
335         vm_map_init(result, min, max);
336         result->pmap = pmap;
337         return (result);
338 }
339
340 /*
341  * Initialize an existing vm_map structure
342  * such as that in the vmspace structure.
343  * The pmap is set elsewhere.
344  */
345 void
346 vm_map_init(struct vm_map *map, vm_offset_t min, vm_offset_t max)
347 {
348         map->header.next = map->header.prev = &map->header;
349         RB_INIT(&map->rb_root);
350         map->nentries = 0;
351         map->size = 0;
352         map->system_map = 0;
353         map->infork = 0;
354         map->min_offset = min;
355         map->max_offset = max;
356         map->first_free = &map->header;
357         map->hint = &map->header;
358         map->timestamp = 0;
359         lockinit(&map->lock, "thrd_sleep", 0, 0);
360 }
361
362 /*
363  * Shadow the vm_map_entry's object.  This typically needs to be done when
364  * a write fault is taken on an entry which had previously been cloned by
365  * fork().  The shared object (which might be NULL) must become private so
366  * we add a shadow layer above it.
367  *
368  * Object allocation for anonymous mappings is defered as long as possible.
369  * When creating a shadow, however, the underlying object must be instantiated
370  * so it can be shared.
371  *
372  * If the map segment is governed by a virtual page table then it is
373  * possible to address offsets beyond the mapped area.  Just allocate
374  * a maximally sized object for this case.
375  */
376 static
377 void
378 vm_map_entry_shadow(vm_map_entry_t entry)
379 {
380         if (entry->maptype == VM_MAPTYPE_VPAGETABLE) {
381                 vm_object_shadow(&entry->object.vm_object, &entry->offset,
382                                  0x7FFFFFFF);   /* XXX */
383         } else {
384                 vm_object_shadow(&entry->object.vm_object, &entry->offset,
385                                  atop(entry->end - entry->start));
386         }
387         entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
388 }
389
390 /*
391  * Allocate an object for a vm_map_entry.
392  *
393  * Object allocation for anonymous mappings is defered as long as possible.
394  * This function is called when we can defer no longer, generally when a map
395  * entry might be split or forked or takes a page fault.
396  *
397  * If the map segment is governed by a virtual page table then it is
398  * possible to address offsets beyond the mapped area.  Just allocate
399  * a maximally sized object for this case.
400  */
401 void 
402 vm_map_entry_allocate_object(vm_map_entry_t entry)
403 {
404         vm_object_t obj;
405
406         if (entry->maptype == VM_MAPTYPE_VPAGETABLE) {
407                 obj = vm_object_allocate(OBJT_DEFAULT, 0x7FFFFFFF); /* XXX */
408         } else {
409                 obj = vm_object_allocate(OBJT_DEFAULT,
410                                          atop(entry->end - entry->start));
411         }
412         entry->object.vm_object = obj;
413         entry->offset = 0;
414 }
415
416 /*
417  *      vm_map_entry_reserve_cpu_init:
418  *
419  *      Set an initial negative count so the first attempt to reserve
420  *      space preloads a bunch of vm_map_entry's for this cpu.  Also
421  *      pre-allocate 2 vm_map_entries which will be needed by zalloc() to
422  *      map a new page for vm_map_entry structures.  SMP systems are
423  *      particularly sensitive.
424  *
425  *      This routine is called in early boot so we cannot just call
426  *      vm_map_entry_reserve().
427  *
428  *      May be called for a gd other then mycpu, but may only be called
429  *      during early boot.
430  */
431 void
432 vm_map_entry_reserve_cpu_init(globaldata_t gd)
433 {
434         vm_map_entry_t entry;
435         int i;
436
437         gd->gd_vme_avail -= MAP_RESERVE_COUNT * 2;
438         entry = &cpu_map_entry_init[gd->gd_cpuid][0];
439         for (i = 0; i < VMEPERCPU; ++i, ++entry) {
440                 entry->next = gd->gd_vme_base;
441                 gd->gd_vme_base = entry;
442         }
443 }
444
445 /*
446  *      vm_map_entry_reserve:
447  *
448  *      Reserves vm_map_entry structures so code later on can manipulate
449  *      map_entry structures within a locked map without blocking trying
450  *      to allocate a new vm_map_entry.
451  */
452 int
453 vm_map_entry_reserve(int count)
454 {
455         struct globaldata *gd = mycpu;
456         vm_map_entry_t entry;
457
458         crit_enter();
459
460         /*
461          * Make sure we have enough structures in gd_vme_base to handle
462          * the reservation request.
463          */
464         while (gd->gd_vme_avail < count) {
465                 entry = zalloc(mapentzone);
466                 entry->next = gd->gd_vme_base;
467                 gd->gd_vme_base = entry;
468                 ++gd->gd_vme_avail;
469         }
470         gd->gd_vme_avail -= count;
471         crit_exit();
472         return(count);
473 }
474
475 /*
476  *      vm_map_entry_release:
477  *
478  *      Releases previously reserved vm_map_entry structures that were not
479  *      used.  If we have too much junk in our per-cpu cache clean some of
480  *      it out.
481  */
482 void
483 vm_map_entry_release(int count)
484 {
485         struct globaldata *gd = mycpu;
486         vm_map_entry_t entry;
487
488         crit_enter();
489         gd->gd_vme_avail += count;
490         while (gd->gd_vme_avail > MAP_RESERVE_SLOP) {
491                 entry = gd->gd_vme_base;
492                 KKASSERT(entry != NULL);
493                 gd->gd_vme_base = entry->next;
494                 --gd->gd_vme_avail;
495                 crit_exit();
496                 zfree(mapentzone, entry);
497                 crit_enter();
498         }
499         crit_exit();
500 }
501
502 /*
503  *      vm_map_entry_kreserve:
504  *
505  *      Reserve map entry structures for use in kernel_map itself.  These
506  *      entries have *ALREADY* been reserved on a per-cpu basis when the map
507  *      was inited.  This function is used by zalloc() to avoid a recursion
508  *      when zalloc() itself needs to allocate additional kernel memory.
509  *
510  *      This function works like the normal reserve but does not load the
511  *      vm_map_entry cache (because that would result in an infinite
512  *      recursion).  Note that gd_vme_avail may go negative.  This is expected.
513  *
514  *      Any caller of this function must be sure to renormalize after 
515  *      potentially eating entries to ensure that the reserve supply
516  *      remains intact.
517  */
518 int
519 vm_map_entry_kreserve(int count)
520 {
521         struct globaldata *gd = mycpu;
522
523         crit_enter();
524         gd->gd_vme_avail -= count;
525         crit_exit();
526         KASSERT(gd->gd_vme_base != NULL, ("no reserved entries left, gd_vme_avail = %d\n", gd->gd_vme_avail));
527         return(count);
528 }
529
530 /*
531  *      vm_map_entry_krelease:
532  *
533  *      Release previously reserved map entries for kernel_map.  We do not
534  *      attempt to clean up like the normal release function as this would
535  *      cause an unnecessary (but probably not fatal) deep procedure call.
536  */
537 void
538 vm_map_entry_krelease(int count)
539 {
540         struct globaldata *gd = mycpu;
541
542         crit_enter();
543         gd->gd_vme_avail += count;
544         crit_exit();
545 }
546
547 /*
548  *      vm_map_entry_create:    [ internal use only ]
549  *
550  *      Allocates a VM map entry for insertion.  No entry fields are filled 
551  *      in.
552  *
553  *      This routine may be called from an interrupt thread but not a FAST
554  *      interrupt.  This routine may recurse the map lock.
555  */
556 static vm_map_entry_t
557 vm_map_entry_create(vm_map_t map, int *countp)
558 {
559         struct globaldata *gd = mycpu;
560         vm_map_entry_t entry;
561
562         KKASSERT(*countp > 0);
563         --*countp;
564         crit_enter();
565         entry = gd->gd_vme_base;
566         KASSERT(entry != NULL, ("gd_vme_base NULL! count %d", *countp));
567         gd->gd_vme_base = entry->next;
568         crit_exit();
569         return(entry);
570 }
571
572 /*
573  *      vm_map_entry_dispose:   [ internal use only ]
574  *
575  *      Dispose of a vm_map_entry that is no longer being referenced.  This
576  *      function may be called from an interrupt.
577  */
578 static void
579 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry, int *countp)
580 {
581         struct globaldata *gd = mycpu;
582
583         KKASSERT(map->hint != entry);
584         KKASSERT(map->first_free != entry);
585
586         ++*countp;
587         crit_enter();
588         entry->next = gd->gd_vme_base;
589         gd->gd_vme_base = entry;
590         crit_exit();
591 }
592
593
594 /*
595  *      vm_map_entry_{un,}link:
596  *
597  *      Insert/remove entries from maps.
598  */
599 static __inline void
600 vm_map_entry_link(vm_map_t map,
601                   vm_map_entry_t after_where,
602                   vm_map_entry_t entry)
603 {
604         map->nentries++;
605         entry->prev = after_where;
606         entry->next = after_where->next;
607         entry->next->prev = entry;
608         after_where->next = entry;
609         if (vm_map_rb_tree_RB_INSERT(&map->rb_root, entry))
610                 panic("vm_map_entry_link: dup addr map %p ent %p", map, entry);
611 }
612
613 static __inline void
614 vm_map_entry_unlink(vm_map_t map,
615                     vm_map_entry_t entry)
616 {
617         vm_map_entry_t prev;
618         vm_map_entry_t next;
619
620         if (entry->eflags & MAP_ENTRY_IN_TRANSITION)
621                 panic("vm_map_entry_unlink: attempt to mess with locked entry! %p", entry);
622         prev = entry->prev;
623         next = entry->next;
624         next->prev = prev;
625         prev->next = next;
626         vm_map_rb_tree_RB_REMOVE(&map->rb_root, entry);
627         map->nentries--;
628 }
629
630 /*
631  *      vm_map_lookup_entry:    [ internal use only ]
632  *
633  *      Finds the map entry containing (or
634  *      immediately preceding) the specified address
635  *      in the given map; the entry is returned
636  *      in the "entry" parameter.  The boolean
637  *      result indicates whether the address is
638  *      actually contained in the map.
639  */
640 boolean_t
641 vm_map_lookup_entry(vm_map_t map, vm_offset_t address,
642     vm_map_entry_t *entry /* OUT */)
643 {
644         vm_map_entry_t tmp;
645         vm_map_entry_t last;
646
647 #if 0
648         /*
649          * XXX TEMPORARILY DISABLED.  For some reason our attempt to revive
650          * the hint code with the red-black lookup meets with system crashes
651          * and lockups.  We do not yet know why.
652          *
653          * It is possible that the problem is related to the setting
654          * of the hint during map_entry deletion, in the code specified
655          * at the GGG comment later on in this file.
656          */
657         /*
658          * Quickly check the cached hint, there's a good chance of a match.
659          */
660         if (map->hint != &map->header) {
661                 tmp = map->hint;
662                 if (address >= tmp->start && address < tmp->end) {
663                         *entry = tmp;
664                         return(TRUE);
665                 }
666         }
667 #endif
668
669         /*
670          * Locate the record from the top of the tree.  'last' tracks the
671          * closest prior record and is returned if no match is found, which
672          * in binary tree terms means tracking the most recent right-branch
673          * taken.  If there is no prior record, &map->header is returned.
674          */
675         last = &map->header;
676         tmp = RB_ROOT(&map->rb_root);
677
678         while (tmp) {
679                 if (address >= tmp->start) {
680                         if (address < tmp->end) {
681                                 *entry = tmp;
682                                 map->hint = tmp;
683                                 return(TRUE);
684                         }
685                         last = tmp;
686                         tmp = RB_RIGHT(tmp, rb_entry);
687                 } else {
688                         tmp = RB_LEFT(tmp, rb_entry);
689                 }
690         }
691         *entry = last;
692         return (FALSE);
693 }
694
695 /*
696  *      vm_map_insert:
697  *
698  *      Inserts the given whole VM object into the target
699  *      map at the specified address range.  The object's
700  *      size should match that of the address range.
701  *
702  *      Requires that the map be locked, and leaves it so.  Requires that
703  *      sufficient vm_map_entry structures have been reserved and tracks
704  *      the use via countp.
705  *
706  *      If object is non-NULL, ref count must be bumped by caller
707  *      prior to making call to account for the new entry.
708  */
709 int
710 vm_map_insert(vm_map_t map, int *countp,
711               vm_object_t object, vm_ooffset_t offset,
712               vm_offset_t start, vm_offset_t end,
713               vm_maptype_t maptype,
714               vm_prot_t prot, vm_prot_t max,
715               int cow)
716 {
717         vm_map_entry_t new_entry;
718         vm_map_entry_t prev_entry;
719         vm_map_entry_t temp_entry;
720         vm_eflags_t protoeflags;
721
722         /*
723          * Check that the start and end points are not bogus.
724          */
725
726         if ((start < map->min_offset) || (end > map->max_offset) ||
727             (start >= end))
728                 return (KERN_INVALID_ADDRESS);
729
730         /*
731          * Find the entry prior to the proposed starting address; if it's part
732          * of an existing entry, this range is bogus.
733          */
734
735         if (vm_map_lookup_entry(map, start, &temp_entry))
736                 return (KERN_NO_SPACE);
737
738         prev_entry = temp_entry;
739
740         /*
741          * Assert that the next entry doesn't overlap the end point.
742          */
743
744         if ((prev_entry->next != &map->header) &&
745             (prev_entry->next->start < end))
746                 return (KERN_NO_SPACE);
747
748         protoeflags = 0;
749
750         if (cow & MAP_COPY_ON_WRITE)
751                 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
752
753         if (cow & MAP_NOFAULT) {
754                 protoeflags |= MAP_ENTRY_NOFAULT;
755
756                 KASSERT(object == NULL,
757                         ("vm_map_insert: paradoxical MAP_NOFAULT request"));
758         }
759         if (cow & MAP_DISABLE_SYNCER)
760                 protoeflags |= MAP_ENTRY_NOSYNC;
761         if (cow & MAP_DISABLE_COREDUMP)
762                 protoeflags |= MAP_ENTRY_NOCOREDUMP;
763
764         if (object) {
765                 /*
766                  * When object is non-NULL, it could be shared with another
767                  * process.  We have to set or clear OBJ_ONEMAPPING 
768                  * appropriately.
769                  */
770                 if ((object->ref_count > 1) || (object->shadow_count != 0)) {
771                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
772                 }
773         }
774         else if ((prev_entry != &map->header) &&
775                  (prev_entry->eflags == protoeflags) &&
776                  (prev_entry->end == start) &&
777                  (prev_entry->wired_count == 0) &&
778                  prev_entry->maptype == maptype &&
779                  ((prev_entry->object.vm_object == NULL) ||
780                   vm_object_coalesce(prev_entry->object.vm_object,
781                                      OFF_TO_IDX(prev_entry->offset),
782                                      (vm_size_t)(prev_entry->end - prev_entry->start),
783                                      (vm_size_t)(end - prev_entry->end)))) {
784                 /*
785                  * We were able to extend the object.  Determine if we
786                  * can extend the previous map entry to include the 
787                  * new range as well.
788                  */
789                 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
790                     (prev_entry->protection == prot) &&
791                     (prev_entry->max_protection == max)) {
792                         map->size += (end - prev_entry->end);
793                         prev_entry->end = end;
794                         vm_map_simplify_entry(map, prev_entry, countp);
795                         return (KERN_SUCCESS);
796                 }
797
798                 /*
799                  * If we can extend the object but cannot extend the
800                  * map entry, we have to create a new map entry.  We
801                  * must bump the ref count on the extended object to
802                  * account for it.  object may be NULL.
803                  */
804                 object = prev_entry->object.vm_object;
805                 offset = prev_entry->offset +
806                         (prev_entry->end - prev_entry->start);
807                 vm_object_reference(object);
808         }
809
810         /*
811          * NOTE: if conditionals fail, object can be NULL here.  This occurs
812          * in things like the buffer map where we manage kva but do not manage
813          * backing objects.
814          */
815
816         /*
817          * Create a new entry
818          */
819
820         new_entry = vm_map_entry_create(map, countp);
821         new_entry->start = start;
822         new_entry->end = end;
823
824         new_entry->maptype = maptype;
825         new_entry->eflags = protoeflags;
826         new_entry->object.vm_object = object;
827         new_entry->offset = offset;
828         new_entry->aux.master_pde = 0;
829
830         new_entry->inheritance = VM_INHERIT_DEFAULT;
831         new_entry->protection = prot;
832         new_entry->max_protection = max;
833         new_entry->wired_count = 0;
834
835         /*
836          * Insert the new entry into the list
837          */
838
839         vm_map_entry_link(map, prev_entry, new_entry);
840         map->size += new_entry->end - new_entry->start;
841
842         /*
843          * Update the free space hint
844          */
845         if ((map->first_free == prev_entry) &&
846             (prev_entry->end >= new_entry->start)) {
847                 map->first_free = new_entry;
848         }
849
850 #if 0
851         /*
852          * Temporarily removed to avoid MAP_STACK panic, due to
853          * MAP_STACK being a huge hack.  Will be added back in
854          * when MAP_STACK (and the user stack mapping) is fixed.
855          */
856         /*
857          * It may be possible to simplify the entry
858          */
859         vm_map_simplify_entry(map, new_entry, countp);
860 #endif
861
862         /*
863          * Try to pre-populate the page table.  Mappings governed by virtual
864          * page tables cannot be prepopulated without a lot of work, so
865          * don't try.
866          */
867         if ((cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) &&
868             maptype != VM_MAPTYPE_VPAGETABLE) {
869                 pmap_object_init_pt(map->pmap, start, prot,
870                                     object, OFF_TO_IDX(offset), end - start,
871                                     cow & MAP_PREFAULT_PARTIAL);
872         }
873
874         return (KERN_SUCCESS);
875 }
876
877 /*
878  * Find sufficient space for `length' bytes in the given map, starting at
879  * `start'.  The map must be locked.  Returns 0 on success, 1 on no space.
880  *
881  * This function will returned an arbitrarily aligned pointer.  If no
882  * particular alignment is required you should pass align as 1.  Note that
883  * the map may return PAGE_SIZE aligned pointers if all the lengths used in
884  * the map are a multiple of PAGE_SIZE, even if you pass a smaller align
885  * argument.
886  *
887  * 'align' should be a power of 2 but is not required to be.
888  */
889 int
890 vm_map_findspace(
891         vm_map_t map,
892         vm_offset_t start,
893         vm_size_t length,
894         vm_offset_t align,
895         vm_offset_t *addr)
896 {
897         vm_map_entry_t entry, next;
898         vm_offset_t end;
899         vm_offset_t align_mask;
900
901         if (start < map->min_offset)
902                 start = map->min_offset;
903         if (start > map->max_offset)
904                 return (1);
905
906         /*
907          * If the alignment is not a power of 2 we will have to use
908          * a mod/division, set align_mask to a special value.
909          */
910         if ((align | (align - 1)) + 1 != (align << 1))
911                 align_mask = (vm_offset_t)-1;
912         else
913                 align_mask = align - 1;
914
915 retry:
916         /*
917          * Look for the first possible address; if there's already something
918          * at this address, we have to start after it.
919          */
920         if (start == map->min_offset) {
921                 if ((entry = map->first_free) != &map->header)
922                         start = entry->end;
923         } else {
924                 vm_map_entry_t tmp;
925
926                 if (vm_map_lookup_entry(map, start, &tmp))
927                         start = tmp->end;
928                 entry = tmp;
929         }
930
931         /*
932          * Look through the rest of the map, trying to fit a new region in the
933          * gap between existing regions, or after the very last region.
934          */
935         for (;; start = (entry = next)->end) {
936                 /*
937                  * Adjust the proposed start by the requested alignment,
938                  * be sure that we didn't wrap the address.
939                  */
940                 if (align_mask == (vm_offset_t)-1)
941                         end = ((start + align - 1) / align) * align;
942                 else
943                         end = (start + align_mask) & ~align_mask;
944                 if (end < start)
945                         return (1);
946                 start = end;
947                 /*
948                  * Find the end of the proposed new region.  Be sure we didn't
949                  * go beyond the end of the map, or wrap around the address.
950                  * Then check to see if this is the last entry or if the 
951                  * proposed end fits in the gap between this and the next
952                  * entry.
953                  */
954                 end = start + length;
955                 if (end > map->max_offset || end < start)
956                         return (1);
957                 next = entry->next;
958                 if (next == &map->header || next->start >= end)
959                         break;
960         }
961         map->hint = entry;
962         if (map == kernel_map) {
963                 vm_offset_t ksize;
964                 if ((ksize = round_page(start + length)) > kernel_vm_end) {
965                         pmap_growkernel(ksize);
966                         goto retry;
967                 }
968         }
969         *addr = start;
970         return (0);
971 }
972
973 /*
974  *      vm_map_find finds an unallocated region in the target address
975  *      map with the given length.  The search is defined to be
976  *      first-fit from the specified address; the region found is
977  *      returned in the same parameter.
978  *
979  *      If object is non-NULL, ref count must be bumped by caller
980  *      prior to making call to account for the new entry.
981  */
982 int
983 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
984             vm_offset_t *addr,  vm_size_t length,
985             boolean_t find_space,
986             vm_maptype_t maptype,
987             vm_prot_t prot, vm_prot_t max,
988             int cow)
989 {
990         vm_offset_t start;
991         int result;
992         int count;
993
994         start = *addr;
995
996         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
997         vm_map_lock(map);
998         if (find_space) {
999                 if (vm_map_findspace(map, start, length, 1, addr)) {
1000                         vm_map_unlock(map);
1001                         vm_map_entry_release(count);
1002                         return (KERN_NO_SPACE);
1003                 }
1004                 start = *addr;
1005         }
1006         result = vm_map_insert(map, &count, object, offset,
1007                                start, start + length,
1008                                maptype,
1009                                prot, max,
1010                                cow);
1011         vm_map_unlock(map);
1012         vm_map_entry_release(count);
1013
1014         return (result);
1015 }
1016
1017 /*
1018  *      vm_map_simplify_entry:
1019  *
1020  *      Simplify the given map entry by merging with either neighbor.  This
1021  *      routine also has the ability to merge with both neighbors.
1022  *
1023  *      The map must be locked.
1024  *
1025  *      This routine guarentees that the passed entry remains valid (though
1026  *      possibly extended).  When merging, this routine may delete one or
1027  *      both neighbors.  No action is taken on entries which have their
1028  *      in-transition flag set.
1029  */
1030 void
1031 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry, int *countp)
1032 {
1033         vm_map_entry_t next, prev;
1034         vm_size_t prevsize, esize;
1035
1036         if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1037                 ++mycpu->gd_cnt.v_intrans_coll;
1038                 return;
1039         }
1040
1041         if (entry->maptype == VM_MAPTYPE_SUBMAP)
1042                 return;
1043
1044         prev = entry->prev;
1045         if (prev != &map->header) {
1046                 prevsize = prev->end - prev->start;
1047                 if ( (prev->end == entry->start) &&
1048                      (prev->maptype == entry->maptype) &&
1049                      (prev->object.vm_object == entry->object.vm_object) &&
1050                      (!prev->object.vm_object ||
1051                         (prev->offset + prevsize == entry->offset)) &&
1052                      (prev->eflags == entry->eflags) &&
1053                      (prev->protection == entry->protection) &&
1054                      (prev->max_protection == entry->max_protection) &&
1055                      (prev->inheritance == entry->inheritance) &&
1056                      (prev->wired_count == entry->wired_count)) {
1057                         if (map->first_free == prev)
1058                                 map->first_free = entry;
1059                         if (map->hint == prev)
1060                                 map->hint = entry;
1061                         vm_map_entry_unlink(map, prev);
1062                         entry->start = prev->start;
1063                         entry->offset = prev->offset;
1064                         if (prev->object.vm_object)
1065                                 vm_object_deallocate(prev->object.vm_object);
1066                         vm_map_entry_dispose(map, prev, countp);
1067                 }
1068         }
1069
1070         next = entry->next;
1071         if (next != &map->header) {
1072                 esize = entry->end - entry->start;
1073                 if ((entry->end == next->start) &&
1074                     (next->maptype == entry->maptype) &&
1075                     (next->object.vm_object == entry->object.vm_object) &&
1076                      (!entry->object.vm_object ||
1077                         (entry->offset + esize == next->offset)) &&
1078                     (next->eflags == entry->eflags) &&
1079                     (next->protection == entry->protection) &&
1080                     (next->max_protection == entry->max_protection) &&
1081                     (next->inheritance == entry->inheritance) &&
1082                     (next->wired_count == entry->wired_count)) {
1083                         if (map->first_free == next)
1084                                 map->first_free = entry;
1085                         if (map->hint == next)
1086                                 map->hint = entry;
1087                         vm_map_entry_unlink(map, next);
1088                         entry->end = next->end;
1089                         if (next->object.vm_object)
1090                                 vm_object_deallocate(next->object.vm_object);
1091                         vm_map_entry_dispose(map, next, countp);
1092                 }
1093         }
1094 }
1095 /*
1096  *      vm_map_clip_start:      [ internal use only ]
1097  *
1098  *      Asserts that the given entry begins at or after
1099  *      the specified address; if necessary,
1100  *      it splits the entry into two.
1101  */
1102 #define vm_map_clip_start(map, entry, startaddr, countp) \
1103 { \
1104         if (startaddr > entry->start) \
1105                 _vm_map_clip_start(map, entry, startaddr, countp); \
1106 }
1107
1108 /*
1109  *      This routine is called only when it is known that
1110  *      the entry must be split.
1111  */
1112 static void
1113 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start, int *countp)
1114 {
1115         vm_map_entry_t new_entry;
1116
1117         /*
1118          * Split off the front portion -- note that we must insert the new
1119          * entry BEFORE this one, so that this entry has the specified
1120          * starting address.
1121          */
1122
1123         vm_map_simplify_entry(map, entry, countp);
1124
1125         /*
1126          * If there is no object backing this entry, we might as well create
1127          * one now.  If we defer it, an object can get created after the map
1128          * is clipped, and individual objects will be created for the split-up
1129          * map.  This is a bit of a hack, but is also about the best place to
1130          * put this improvement.
1131          */
1132         if (entry->object.vm_object == NULL && !map->system_map) {
1133                 vm_map_entry_allocate_object(entry);
1134         }
1135
1136         new_entry = vm_map_entry_create(map, countp);
1137         *new_entry = *entry;
1138
1139         new_entry->end = start;
1140         entry->offset += (start - entry->start);
1141         entry->start = start;
1142
1143         vm_map_entry_link(map, entry->prev, new_entry);
1144
1145         switch(entry->maptype) {
1146         case VM_MAPTYPE_NORMAL:
1147         case VM_MAPTYPE_VPAGETABLE:
1148                 vm_object_reference(new_entry->object.vm_object);
1149                 break;
1150         default:
1151                 break;
1152         }
1153 }
1154
1155 /*
1156  *      vm_map_clip_end:        [ internal use only ]
1157  *
1158  *      Asserts that the given entry ends at or before
1159  *      the specified address; if necessary,
1160  *      it splits the entry into two.
1161  */
1162
1163 #define vm_map_clip_end(map, entry, endaddr, countp) \
1164 { \
1165         if (endaddr < entry->end) \
1166                 _vm_map_clip_end(map, entry, endaddr, countp); \
1167 }
1168
1169 /*
1170  *      This routine is called only when it is known that
1171  *      the entry must be split.
1172  */
1173 static void
1174 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end, int *countp)
1175 {
1176         vm_map_entry_t new_entry;
1177
1178         /*
1179          * If there is no object backing this entry, we might as well create
1180          * one now.  If we defer it, an object can get created after the map
1181          * is clipped, and individual objects will be created for the split-up
1182          * map.  This is a bit of a hack, but is also about the best place to
1183          * put this improvement.
1184          */
1185
1186         if (entry->object.vm_object == NULL && !map->system_map) {
1187                 vm_map_entry_allocate_object(entry);
1188         }
1189
1190         /*
1191          * Create a new entry and insert it AFTER the specified entry
1192          */
1193
1194         new_entry = vm_map_entry_create(map, countp);
1195         *new_entry = *entry;
1196
1197         new_entry->start = entry->end = end;
1198         new_entry->offset += (end - entry->start);
1199
1200         vm_map_entry_link(map, entry, new_entry);
1201
1202         switch(entry->maptype) {
1203         case VM_MAPTYPE_NORMAL:
1204         case VM_MAPTYPE_VPAGETABLE:
1205                 vm_object_reference(new_entry->object.vm_object);
1206                 break;
1207         default:
1208                 break;
1209         }
1210 }
1211
1212 /*
1213  *      VM_MAP_RANGE_CHECK:     [ internal use only ]
1214  *
1215  *      Asserts that the starting and ending region
1216  *      addresses fall within the valid range of the map.
1217  */
1218 #define VM_MAP_RANGE_CHECK(map, start, end)             \
1219                 {                                       \
1220                 if (start < vm_map_min(map))            \
1221                         start = vm_map_min(map);        \
1222                 if (end > vm_map_max(map))              \
1223                         end = vm_map_max(map);          \
1224                 if (start > end)                        \
1225                         start = end;                    \
1226                 }
1227
1228 /*
1229  *      vm_map_transition_wait: [ kernel use only ]
1230  *
1231  *      Used to block when an in-transition collison occurs.  The map
1232  *      is unlocked for the sleep and relocked before the return.
1233  */
1234 static
1235 void
1236 vm_map_transition_wait(vm_map_t map)
1237 {
1238         vm_map_unlock(map);
1239         tsleep(map, 0, "vment", 0);
1240         vm_map_lock(map);
1241 }
1242
1243 /*
1244  * CLIP_CHECK_BACK
1245  * CLIP_CHECK_FWD
1246  *
1247  *      When we do blocking operations with the map lock held it is
1248  *      possible that a clip might have occured on our in-transit entry,
1249  *      requiring an adjustment to the entry in our loop.  These macros
1250  *      help the pageable and clip_range code deal with the case.  The
1251  *      conditional costs virtually nothing if no clipping has occured.
1252  */
1253
1254 #define CLIP_CHECK_BACK(entry, save_start)              \
1255     do {                                                \
1256             while (entry->start != save_start) {        \
1257                     entry = entry->prev;                \
1258                     KASSERT(entry != &map->header, ("bad entry clip")); \
1259             }                                           \
1260     } while(0)
1261
1262 #define CLIP_CHECK_FWD(entry, save_end)                 \
1263     do {                                                \
1264             while (entry->end != save_end) {            \
1265                     entry = entry->next;                \
1266                     KASSERT(entry != &map->header, ("bad entry clip")); \
1267             }                                           \
1268     } while(0)
1269
1270
1271 /*
1272  *      vm_map_clip_range:      [ kernel use only ]
1273  *
1274  *      Clip the specified range and return the base entry.  The
1275  *      range may cover several entries starting at the returned base
1276  *      and the first and last entry in the covering sequence will be
1277  *      properly clipped to the requested start and end address.
1278  *
1279  *      If no holes are allowed you should pass the MAP_CLIP_NO_HOLES
1280  *      flag.  
1281  *
1282  *      The MAP_ENTRY_IN_TRANSITION flag will be set for the entries
1283  *      covered by the requested range.
1284  *
1285  *      The map must be exclusively locked on entry and will remain locked
1286  *      on return. If no range exists or the range contains holes and you
1287  *      specified that no holes were allowed, NULL will be returned.  This
1288  *      routine may temporarily unlock the map in order avoid a deadlock when
1289  *      sleeping.
1290  */
1291 static
1292 vm_map_entry_t
1293 vm_map_clip_range(vm_map_t map, vm_offset_t start, vm_offset_t end, 
1294         int *countp, int flags)
1295 {
1296         vm_map_entry_t start_entry;
1297         vm_map_entry_t entry;
1298
1299         /*
1300          * Locate the entry and effect initial clipping.  The in-transition
1301          * case does not occur very often so do not try to optimize it.
1302          */
1303 again:
1304         if (vm_map_lookup_entry(map, start, &start_entry) == FALSE)
1305                 return (NULL);
1306         entry = start_entry;
1307         if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1308                 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1309                 ++mycpu->gd_cnt.v_intrans_coll;
1310                 ++mycpu->gd_cnt.v_intrans_wait;
1311                 vm_map_transition_wait(map);
1312                 /*
1313                  * entry and/or start_entry may have been clipped while
1314                  * we slept, or may have gone away entirely.  We have
1315                  * to restart from the lookup.
1316                  */
1317                 goto again;
1318         }
1319         /*
1320          * Since we hold an exclusive map lock we do not have to restart
1321          * after clipping, even though clipping may block in zalloc.
1322          */
1323         vm_map_clip_start(map, entry, start, countp);
1324         vm_map_clip_end(map, entry, end, countp);
1325         entry->eflags |= MAP_ENTRY_IN_TRANSITION;
1326
1327         /*
1328          * Scan entries covered by the range.  When working on the next
1329          * entry a restart need only re-loop on the current entry which
1330          * we have already locked, since 'next' may have changed.  Also,
1331          * even though entry is safe, it may have been clipped so we
1332          * have to iterate forwards through the clip after sleeping.
1333          */
1334         while (entry->next != &map->header && entry->next->start < end) {
1335                 vm_map_entry_t next = entry->next;
1336
1337                 if (flags & MAP_CLIP_NO_HOLES) {
1338                         if (next->start > entry->end) {
1339                                 vm_map_unclip_range(map, start_entry,
1340                                         start, entry->end, countp, flags);
1341                                 return(NULL);
1342                         }
1343                 }
1344
1345                 if (next->eflags & MAP_ENTRY_IN_TRANSITION) {
1346                         vm_offset_t save_end = entry->end;
1347                         next->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1348                         ++mycpu->gd_cnt.v_intrans_coll;
1349                         ++mycpu->gd_cnt.v_intrans_wait;
1350                         vm_map_transition_wait(map);
1351
1352                         /*
1353                          * clips might have occured while we blocked.
1354                          */
1355                         CLIP_CHECK_FWD(entry, save_end);
1356                         CLIP_CHECK_BACK(start_entry, start);
1357                         continue;
1358                 }
1359                 /*
1360                  * No restart necessary even though clip_end may block, we
1361                  * are holding the map lock.
1362                  */
1363                 vm_map_clip_end(map, next, end, countp);
1364                 next->eflags |= MAP_ENTRY_IN_TRANSITION;
1365                 entry = next;
1366         }
1367         if (flags & MAP_CLIP_NO_HOLES) {
1368                 if (entry->end != end) {
1369                         vm_map_unclip_range(map, start_entry,
1370                                 start, entry->end, countp, flags);
1371                         return(NULL);
1372                 }
1373         }
1374         return(start_entry);
1375 }
1376
1377 /*
1378  *      vm_map_unclip_range:    [ kernel use only ]
1379  *
1380  *      Undo the effect of vm_map_clip_range().  You should pass the same
1381  *      flags and the same range that you passed to vm_map_clip_range().
1382  *      This code will clear the in-transition flag on the entries and
1383  *      wake up anyone waiting.  This code will also simplify the sequence 
1384  *      and attempt to merge it with entries before and after the sequence.
1385  *
1386  *      The map must be locked on entry and will remain locked on return.
1387  *
1388  *      Note that you should also pass the start_entry returned by 
1389  *      vm_map_clip_range().  However, if you block between the two calls
1390  *      with the map unlocked please be aware that the start_entry may
1391  *      have been clipped and you may need to scan it backwards to find
1392  *      the entry corresponding with the original start address.  You are
1393  *      responsible for this, vm_map_unclip_range() expects the correct
1394  *      start_entry to be passed to it and will KASSERT otherwise.
1395  */
1396 static
1397 void
1398 vm_map_unclip_range(
1399         vm_map_t map,
1400         vm_map_entry_t start_entry,
1401         vm_offset_t start,
1402         vm_offset_t end,
1403         int *countp,
1404         int flags)
1405 {
1406         vm_map_entry_t entry;
1407
1408         entry = start_entry;
1409
1410         KASSERT(entry->start == start, ("unclip_range: illegal base entry"));
1411         while (entry != &map->header && entry->start < end) {
1412                 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION, ("in-transition flag not set during unclip on: %p", entry));
1413                 KASSERT(entry->end <= end, ("unclip_range: tail wasn't clipped"));
1414                 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
1415                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
1416                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
1417                         wakeup(map);
1418                 }
1419                 entry = entry->next;
1420         }
1421
1422         /*
1423          * Simplification does not block so there is no restart case.
1424          */
1425         entry = start_entry;
1426         while (entry != &map->header && entry->start < end) {
1427                 vm_map_simplify_entry(map, entry, countp);
1428                 entry = entry->next;
1429         }
1430 }
1431
1432 /*
1433  *      vm_map_submap:          [ kernel use only ]
1434  *
1435  *      Mark the given range as handled by a subordinate map.
1436  *
1437  *      This range must have been created with vm_map_find,
1438  *      and no other operations may have been performed on this
1439  *      range prior to calling vm_map_submap.
1440  *
1441  *      Only a limited number of operations can be performed
1442  *      within this rage after calling vm_map_submap:
1443  *              vm_fault
1444  *      [Don't try vm_map_copy!]
1445  *
1446  *      To remove a submapping, one must first remove the
1447  *      range from the superior map, and then destroy the
1448  *      submap (if desired).  [Better yet, don't try it.]
1449  */
1450 int
1451 vm_map_submap(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_map_t submap)
1452 {
1453         vm_map_entry_t entry;
1454         int result = KERN_INVALID_ARGUMENT;
1455         int count;
1456
1457         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1458         vm_map_lock(map);
1459
1460         VM_MAP_RANGE_CHECK(map, start, end);
1461
1462         if (vm_map_lookup_entry(map, start, &entry)) {
1463                 vm_map_clip_start(map, entry, start, &count);
1464         } else {
1465                 entry = entry->next;
1466         }
1467
1468         vm_map_clip_end(map, entry, end, &count);
1469
1470         if ((entry->start == start) && (entry->end == end) &&
1471             ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1472             (entry->object.vm_object == NULL)) {
1473                 entry->object.sub_map = submap;
1474                 entry->maptype = VM_MAPTYPE_SUBMAP;
1475                 result = KERN_SUCCESS;
1476         }
1477         vm_map_unlock(map);
1478         vm_map_entry_release(count);
1479
1480         return (result);
1481 }
1482
1483 /*
1484  * vm_map_protect:
1485  *
1486  * Sets the protection of the specified address region in the target map. 
1487  * If "set_max" is specified, the maximum protection is to be set;
1488  * otherwise, only the current protection is affected.
1489  *
1490  * The protection is not applicable to submaps, but is applicable to normal
1491  * maps and maps governed by virtual page tables.  For example, when operating
1492  * on a virtual page table our protection basically controls how COW occurs
1493  * on the backing object, whereas the virtual page table abstraction itself
1494  * is an abstraction for userland.
1495  */
1496 int
1497 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1498                vm_prot_t new_prot, boolean_t set_max)
1499 {
1500         vm_map_entry_t current;
1501         vm_map_entry_t entry;
1502         int count;
1503
1504         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1505         vm_map_lock(map);
1506
1507         VM_MAP_RANGE_CHECK(map, start, end);
1508
1509         if (vm_map_lookup_entry(map, start, &entry)) {
1510                 vm_map_clip_start(map, entry, start, &count);
1511         } else {
1512                 entry = entry->next;
1513         }
1514
1515         /*
1516          * Make a first pass to check for protection violations.
1517          */
1518         current = entry;
1519         while ((current != &map->header) && (current->start < end)) {
1520                 if (current->maptype == VM_MAPTYPE_SUBMAP) {
1521                         vm_map_unlock(map);
1522                         vm_map_entry_release(count);
1523                         return (KERN_INVALID_ARGUMENT);
1524                 }
1525                 if ((new_prot & current->max_protection) != new_prot) {
1526                         vm_map_unlock(map);
1527                         vm_map_entry_release(count);
1528                         return (KERN_PROTECTION_FAILURE);
1529                 }
1530                 current = current->next;
1531         }
1532
1533         /*
1534          * Go back and fix up protections. [Note that clipping is not
1535          * necessary the second time.]
1536          */
1537         current = entry;
1538
1539         while ((current != &map->header) && (current->start < end)) {
1540                 vm_prot_t old_prot;
1541
1542                 vm_map_clip_end(map, current, end, &count);
1543
1544                 old_prot = current->protection;
1545                 if (set_max) {
1546                         current->protection =
1547                             (current->max_protection = new_prot) &
1548                             old_prot;
1549                 } else {
1550                         current->protection = new_prot;
1551                 }
1552
1553                 /*
1554                  * Update physical map if necessary. Worry about copy-on-write
1555                  * here -- CHECK THIS XXX
1556                  */
1557
1558                 if (current->protection != old_prot) {
1559 #define MASK(entry)     (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1560                                                         VM_PROT_ALL)
1561
1562                         pmap_protect(map->pmap, current->start,
1563                             current->end,
1564                             current->protection & MASK(current));
1565 #undef  MASK
1566                 }
1567
1568                 vm_map_simplify_entry(map, current, &count);
1569
1570                 current = current->next;
1571         }
1572
1573         vm_map_unlock(map);
1574         vm_map_entry_release(count);
1575         return (KERN_SUCCESS);
1576 }
1577
1578 /*
1579  *      vm_map_madvise:
1580  *
1581  *      This routine traverses a processes map handling the madvise
1582  *      system call.  Advisories are classified as either those effecting
1583  *      the vm_map_entry structure, or those effecting the underlying 
1584  *      objects.
1585  *
1586  *      The <value> argument is used for extended madvise calls.
1587  */
1588 int
1589 vm_map_madvise(vm_map_t map, vm_offset_t start, vm_offset_t end,
1590                int behav, off_t value)
1591 {
1592         vm_map_entry_t current, entry;
1593         int modify_map = 0;
1594         int error = 0;
1595         int count;
1596
1597         /*
1598          * Some madvise calls directly modify the vm_map_entry, in which case
1599          * we need to use an exclusive lock on the map and we need to perform 
1600          * various clipping operations.  Otherwise we only need a read-lock
1601          * on the map.
1602          */
1603
1604         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1605
1606         switch(behav) {
1607         case MADV_NORMAL:
1608         case MADV_SEQUENTIAL:
1609         case MADV_RANDOM:
1610         case MADV_NOSYNC:
1611         case MADV_AUTOSYNC:
1612         case MADV_NOCORE:
1613         case MADV_CORE:
1614         case MADV_SETMAP:
1615         case MADV_INVAL:
1616                 modify_map = 1;
1617                 vm_map_lock(map);
1618                 break;
1619         case MADV_WILLNEED:
1620         case MADV_DONTNEED:
1621         case MADV_FREE:
1622                 vm_map_lock_read(map);
1623                 break;
1624         default:
1625                 vm_map_entry_release(count);
1626                 return (EINVAL);
1627         }
1628
1629         /*
1630          * Locate starting entry and clip if necessary.
1631          */
1632
1633         VM_MAP_RANGE_CHECK(map, start, end);
1634
1635         if (vm_map_lookup_entry(map, start, &entry)) {
1636                 if (modify_map)
1637                         vm_map_clip_start(map, entry, start, &count);
1638         } else {
1639                 entry = entry->next;
1640         }
1641
1642         if (modify_map) {
1643                 /*
1644                  * madvise behaviors that are implemented in the vm_map_entry.
1645                  *
1646                  * We clip the vm_map_entry so that behavioral changes are
1647                  * limited to the specified address range.
1648                  */
1649                 for (current = entry;
1650                      (current != &map->header) && (current->start < end);
1651                      current = current->next
1652                 ) {
1653                         if (current->maptype == VM_MAPTYPE_SUBMAP)
1654                                 continue;
1655
1656                         vm_map_clip_end(map, current, end, &count);
1657
1658                         switch (behav) {
1659                         case MADV_NORMAL:
1660                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
1661                                 break;
1662                         case MADV_SEQUENTIAL:
1663                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
1664                                 break;
1665                         case MADV_RANDOM:
1666                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
1667                                 break;
1668                         case MADV_NOSYNC:
1669                                 current->eflags |= MAP_ENTRY_NOSYNC;
1670                                 break;
1671                         case MADV_AUTOSYNC:
1672                                 current->eflags &= ~MAP_ENTRY_NOSYNC;
1673                                 break;
1674                         case MADV_NOCORE:
1675                                 current->eflags |= MAP_ENTRY_NOCOREDUMP;
1676                                 break;
1677                         case MADV_CORE:
1678                                 current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
1679                                 break;
1680                         case MADV_INVAL:
1681                                 /*
1682                                  * Invalidate the related pmap entries, used
1683                                  * to flush portions of the real kernel's
1684                                  * pmap when the caller has removed or
1685                                  * modified existing mappings in a virtual
1686                                  * page table.
1687                                  */
1688                                 pmap_remove(map->pmap,
1689                                             current->start, current->end);
1690                                 break;
1691                         case MADV_SETMAP:
1692                                 /*
1693                                  * Set the page directory page for a map
1694                                  * governed by a virtual page table.  Mark
1695                                  * the entry as being governed by a virtual
1696                                  * page table if it is not.
1697                                  *
1698                                  * XXX the page directory page is stored
1699                                  * in the avail_ssize field if the map_entry.
1700                                  *
1701                                  * XXX the map simplification code does not
1702                                  * compare this field so weird things may
1703                                  * happen if you do not apply this function
1704                                  * to the entire mapping governed by the
1705                                  * virtual page table.
1706                                  */
1707                                 if (current->maptype != VM_MAPTYPE_VPAGETABLE) {
1708                                         error = EINVAL;
1709                                         break;
1710                                 }
1711                                 current->aux.master_pde = value;
1712                                 pmap_remove(map->pmap,
1713                                             current->start, current->end);
1714                                 break;
1715                         default:
1716                                 error = EINVAL;
1717                                 break;
1718                         }
1719                         vm_map_simplify_entry(map, current, &count);
1720                 }
1721                 vm_map_unlock(map);
1722         } else {
1723                 vm_pindex_t pindex;
1724                 int count;
1725
1726                 /*
1727                  * madvise behaviors that are implemented in the underlying
1728                  * vm_object.
1729                  *
1730                  * Since we don't clip the vm_map_entry, we have to clip
1731                  * the vm_object pindex and count.
1732                  *
1733                  * NOTE!  We currently do not support these functions on
1734                  * virtual page tables.
1735                  */
1736                 for (current = entry;
1737                      (current != &map->header) && (current->start < end);
1738                      current = current->next
1739                 ) {
1740                         vm_offset_t useStart;
1741
1742                         if (current->maptype != VM_MAPTYPE_NORMAL)
1743                                 continue;
1744
1745                         pindex = OFF_TO_IDX(current->offset);
1746                         count = atop(current->end - current->start);
1747                         useStart = current->start;
1748
1749                         if (current->start < start) {
1750                                 pindex += atop(start - current->start);
1751                                 count -= atop(start - current->start);
1752                                 useStart = start;
1753                         }
1754                         if (current->end > end)
1755                                 count -= atop(current->end - end);
1756
1757                         if (count <= 0)
1758                                 continue;
1759
1760                         vm_object_madvise(current->object.vm_object,
1761                                           pindex, count, behav);
1762
1763                         /*
1764                          * Try to populate the page table.  Mappings governed
1765                          * by virtual page tables cannot be pre-populated
1766                          * without a lot of work so don't try.
1767                          */
1768                         if (behav == MADV_WILLNEED &&
1769                             current->maptype != VM_MAPTYPE_VPAGETABLE) {
1770                                 pmap_object_init_pt(
1771                                     map->pmap, 
1772                                     useStart,
1773                                     current->protection,
1774                                     current->object.vm_object,
1775                                     pindex, 
1776                                     (count << PAGE_SHIFT),
1777                                     MAP_PREFAULT_MADVISE
1778                                 );
1779                         }
1780                 }
1781                 vm_map_unlock_read(map);
1782         }
1783         vm_map_entry_release(count);
1784         return(error);
1785 }       
1786
1787
1788 /*
1789  *      vm_map_inherit:
1790  *
1791  *      Sets the inheritance of the specified address
1792  *      range in the target map.  Inheritance
1793  *      affects how the map will be shared with
1794  *      child maps at the time of vm_map_fork.
1795  */
1796 int
1797 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
1798                vm_inherit_t new_inheritance)
1799 {
1800         vm_map_entry_t entry;
1801         vm_map_entry_t temp_entry;
1802         int count;
1803
1804         switch (new_inheritance) {
1805         case VM_INHERIT_NONE:
1806         case VM_INHERIT_COPY:
1807         case VM_INHERIT_SHARE:
1808                 break;
1809         default:
1810                 return (KERN_INVALID_ARGUMENT);
1811         }
1812
1813         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1814         vm_map_lock(map);
1815
1816         VM_MAP_RANGE_CHECK(map, start, end);
1817
1818         if (vm_map_lookup_entry(map, start, &temp_entry)) {
1819                 entry = temp_entry;
1820                 vm_map_clip_start(map, entry, start, &count);
1821         } else
1822                 entry = temp_entry->next;
1823
1824         while ((entry != &map->header) && (entry->start < end)) {
1825                 vm_map_clip_end(map, entry, end, &count);
1826
1827                 entry->inheritance = new_inheritance;
1828
1829                 vm_map_simplify_entry(map, entry, &count);
1830
1831                 entry = entry->next;
1832         }
1833         vm_map_unlock(map);
1834         vm_map_entry_release(count);
1835         return (KERN_SUCCESS);
1836 }
1837
1838 /*
1839  * Implement the semantics of mlock
1840  */
1841 int
1842 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t real_end,
1843     boolean_t new_pageable)
1844 {
1845         vm_map_entry_t entry;
1846         vm_map_entry_t start_entry;
1847         vm_offset_t end;
1848         int rv = KERN_SUCCESS;
1849         int count;
1850
1851         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1852         vm_map_lock(map);
1853         VM_MAP_RANGE_CHECK(map, start, real_end);
1854         end = real_end;
1855
1856         start_entry = vm_map_clip_range(map, start, end, &count, MAP_CLIP_NO_HOLES);
1857         if (start_entry == NULL) {
1858                 vm_map_unlock(map);
1859                 vm_map_entry_release(count);
1860                 return (KERN_INVALID_ADDRESS);
1861         }
1862
1863         if (new_pageable == 0) {
1864                 entry = start_entry;
1865                 while ((entry != &map->header) && (entry->start < end)) {
1866                         vm_offset_t save_start;
1867                         vm_offset_t save_end;
1868
1869                         /*
1870                          * Already user wired or hard wired (trivial cases)
1871                          */
1872                         if (entry->eflags & MAP_ENTRY_USER_WIRED) {
1873                                 entry = entry->next;
1874                                 continue;
1875                         }
1876                         if (entry->wired_count != 0) {
1877                                 entry->wired_count++;
1878                                 entry->eflags |= MAP_ENTRY_USER_WIRED;
1879                                 entry = entry->next;
1880                                 continue;
1881                         }
1882
1883                         /*
1884                          * A new wiring requires instantiation of appropriate
1885                          * management structures and the faulting in of the
1886                          * page.
1887                          */
1888                         if (entry->maptype != VM_MAPTYPE_SUBMAP) {
1889                                 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
1890                                 if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) {
1891                                         vm_map_entry_shadow(entry);
1892                                 } else if (entry->object.vm_object == NULL &&
1893                                            !map->system_map) {
1894                                         vm_map_entry_allocate_object(entry);
1895                                 }
1896                         }
1897                         entry->wired_count++;
1898                         entry->eflags |= MAP_ENTRY_USER_WIRED;
1899
1900                         /*
1901                          * Now fault in the area.  Note that vm_fault_wire()
1902                          * may release the map lock temporarily, it will be
1903                          * relocked on return.  The in-transition
1904                          * flag protects the entries. 
1905                          */
1906                         save_start = entry->start;
1907                         save_end = entry->end;
1908                         rv = vm_fault_wire(map, entry, TRUE);
1909                         if (rv) {
1910                                 CLIP_CHECK_BACK(entry, save_start);
1911                                 for (;;) {
1912                                         KASSERT(entry->wired_count == 1, ("bad wired_count on entry"));
1913                                         entry->eflags &= ~MAP_ENTRY_USER_WIRED;
1914                                         entry->wired_count = 0;
1915                                         if (entry->end == save_end)
1916                                                 break;
1917                                         entry = entry->next;
1918                                         KASSERT(entry != &map->header, ("bad entry clip during backout"));
1919                                 }
1920                                 end = save_start;       /* unwire the rest */
1921                                 break;
1922                         }
1923                         /*
1924                          * note that even though the entry might have been
1925                          * clipped, the USER_WIRED flag we set prevents
1926                          * duplication so we do not have to do a 
1927                          * clip check.
1928                          */
1929                         entry = entry->next;
1930                 }
1931
1932                 /*
1933                  * If we failed fall through to the unwiring section to
1934                  * unwire what we had wired so far.  'end' has already
1935                  * been adjusted.
1936                  */
1937                 if (rv)
1938                         new_pageable = 1;
1939
1940                 /*
1941                  * start_entry might have been clipped if we unlocked the
1942                  * map and blocked.  No matter how clipped it has gotten
1943                  * there should be a fragment that is on our start boundary.
1944                  */
1945                 CLIP_CHECK_BACK(start_entry, start);
1946         }
1947
1948         /*
1949          * Deal with the unwiring case.
1950          */
1951         if (new_pageable) {
1952                 /*
1953                  * This is the unwiring case.  We must first ensure that the
1954                  * range to be unwired is really wired down.  We know there
1955                  * are no holes.
1956                  */
1957                 entry = start_entry;
1958                 while ((entry != &map->header) && (entry->start < end)) {
1959                         if ((entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
1960                                 rv = KERN_INVALID_ARGUMENT;
1961                                 goto done;
1962                         }
1963                         KASSERT(entry->wired_count != 0, ("wired count was 0 with USER_WIRED set! %p", entry));
1964                         entry = entry->next;
1965                 }
1966
1967                 /*
1968                  * Now decrement the wiring count for each region. If a region
1969                  * becomes completely unwired, unwire its physical pages and
1970                  * mappings.
1971                  */
1972                 /*
1973                  * The map entries are processed in a loop, checking to
1974                  * make sure the entry is wired and asserting it has a wired
1975                  * count. However, another loop was inserted more-or-less in
1976                  * the middle of the unwiring path. This loop picks up the
1977                  * "entry" loop variable from the first loop without first
1978                  * setting it to start_entry. Naturally, the secound loop
1979                  * is never entered and the pages backing the entries are
1980                  * never unwired. This can lead to a leak of wired pages.
1981                  */
1982                 entry = start_entry;
1983                 while ((entry != &map->header) && (entry->start < end)) {
1984                         KASSERT(entry->eflags & MAP_ENTRY_USER_WIRED,
1985                                 ("expected USER_WIRED on entry %p", entry));
1986                         entry->eflags &= ~MAP_ENTRY_USER_WIRED;
1987                         entry->wired_count--;
1988                         if (entry->wired_count == 0)
1989                                 vm_fault_unwire(map, entry);
1990                         entry = entry->next;
1991                 }
1992         }
1993 done:
1994         vm_map_unclip_range(map, start_entry, start, real_end, &count,
1995                 MAP_CLIP_NO_HOLES);
1996         map->timestamp++;
1997         vm_map_unlock(map);
1998         vm_map_entry_release(count);
1999         return (rv);
2000 }
2001
2002 /*
2003  *      vm_map_wire:
2004  *
2005  *      Sets the pageability of the specified address
2006  *      range in the target map.  Regions specified
2007  *      as not pageable require locked-down physical
2008  *      memory and physical page maps.
2009  *
2010  *      The map must not be locked, but a reference
2011  *      must remain to the map throughout the call.
2012  *
2013  *      This function may be called via the zalloc path and must properly
2014  *      reserve map entries for kernel_map.
2015  */
2016 int
2017 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t real_end, int kmflags)
2018 {
2019         vm_map_entry_t entry;
2020         vm_map_entry_t start_entry;
2021         vm_offset_t end;
2022         int rv = KERN_SUCCESS;
2023         int count;
2024
2025         if (kmflags & KM_KRESERVE)
2026                 count = vm_map_entry_kreserve(MAP_RESERVE_COUNT);
2027         else
2028                 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2029         vm_map_lock(map);
2030         VM_MAP_RANGE_CHECK(map, start, real_end);
2031         end = real_end;
2032
2033         start_entry = vm_map_clip_range(map, start, end, &count, MAP_CLIP_NO_HOLES);
2034         if (start_entry == NULL) {
2035                 vm_map_unlock(map);
2036                 rv = KERN_INVALID_ADDRESS;
2037                 goto failure;
2038         }
2039         if ((kmflags & KM_PAGEABLE) == 0) {
2040                 /*
2041                  * Wiring.  
2042                  *
2043                  * 1.  Holding the write lock, we create any shadow or zero-fill
2044                  * objects that need to be created. Then we clip each map
2045                  * entry to the region to be wired and increment its wiring
2046                  * count.  We create objects before clipping the map entries
2047                  * to avoid object proliferation.
2048                  *
2049                  * 2.  We downgrade to a read lock, and call vm_fault_wire to
2050                  * fault in the pages for any newly wired area (wired_count is
2051                  * 1).
2052                  *
2053                  * Downgrading to a read lock for vm_fault_wire avoids a 
2054                  * possible deadlock with another process that may have faulted
2055                  * on one of the pages to be wired (it would mark the page busy,
2056                  * blocking us, then in turn block on the map lock that we
2057                  * hold).  Because of problems in the recursive lock package,
2058                  * we cannot upgrade to a write lock in vm_map_lookup.  Thus,
2059                  * any actions that require the write lock must be done
2060                  * beforehand.  Because we keep the read lock on the map, the
2061                  * copy-on-write status of the entries we modify here cannot
2062                  * change.
2063                  */
2064
2065                 entry = start_entry;
2066                 while ((entry != &map->header) && (entry->start < end)) {
2067                         /*
2068                          * Trivial case if the entry is already wired
2069                          */
2070                         if (entry->wired_count) {
2071                                 entry->wired_count++;
2072                                 entry = entry->next;
2073                                 continue;
2074                         }
2075
2076                         /*
2077                          * The entry is being newly wired, we have to setup
2078                          * appropriate management structures.  A shadow 
2079                          * object is required for a copy-on-write region,
2080                          * or a normal object for a zero-fill region.  We
2081                          * do not have to do this for entries that point to sub
2082                          * maps because we won't hold the lock on the sub map.
2083                          */
2084                         if (entry->maptype != VM_MAPTYPE_SUBMAP) {
2085                                 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
2086                                 if (copyflag &&
2087                                     ((entry->protection & VM_PROT_WRITE) != 0)) {
2088                                         vm_map_entry_shadow(entry);
2089                                 } else if (entry->object.vm_object == NULL &&
2090                                            !map->system_map) {
2091                                         vm_map_entry_allocate_object(entry);
2092                                 }
2093                         }
2094
2095                         entry->wired_count++;
2096                         entry = entry->next;
2097                 }
2098
2099                 /*
2100                  * Pass 2.
2101                  */
2102
2103                 /*
2104                  * HACK HACK HACK HACK
2105                  *
2106                  * Unlock the map to avoid deadlocks.  The in-transit flag
2107                  * protects us from most changes but note that
2108                  * clipping may still occur.  To prevent clipping from
2109                  * occuring after the unlock, except for when we are
2110                  * blocking in vm_fault_wire, we must run in a critical
2111                  * section, otherwise our accesses to entry->start and 
2112                  * entry->end could be corrupted.  We have to enter the
2113                  * critical section prior to unlocking so start_entry does
2114                  * not change out from under us at the very beginning of the
2115                  * loop.
2116                  *
2117                  * HACK HACK HACK HACK
2118                  */
2119
2120                 crit_enter();
2121
2122                 entry = start_entry;
2123                 while (entry != &map->header && entry->start < end) {
2124                         /*
2125                          * If vm_fault_wire fails for any page we need to undo
2126                          * what has been done.  We decrement the wiring count
2127                          * for those pages which have not yet been wired (now)
2128                          * and unwire those that have (later).
2129                          */
2130                         vm_offset_t save_start = entry->start;
2131                         vm_offset_t save_end = entry->end;
2132
2133                         if (entry->wired_count == 1)
2134                                 rv = vm_fault_wire(map, entry, FALSE);
2135                         if (rv) {
2136                                 CLIP_CHECK_BACK(entry, save_start);
2137                                 for (;;) {
2138                                         KASSERT(entry->wired_count == 1, ("wired_count changed unexpectedly"));
2139                                         entry->wired_count = 0;
2140                                         if (entry->end == save_end)
2141                                                 break;
2142                                         entry = entry->next;
2143                                         KASSERT(entry != &map->header, ("bad entry clip during backout"));
2144                                 }
2145                                 end = save_start;
2146                                 break;
2147                         }
2148                         CLIP_CHECK_FWD(entry, save_end);
2149                         entry = entry->next;
2150                 }
2151                 crit_exit();
2152
2153                 /*
2154                  * If a failure occured undo everything by falling through
2155                  * to the unwiring code.  'end' has already been adjusted
2156                  * appropriately.
2157                  */
2158                 if (rv)
2159                         kmflags |= KM_PAGEABLE;
2160
2161                 /*
2162                  * start_entry is still IN_TRANSITION but may have been 
2163                  * clipped since vm_fault_wire() unlocks and relocks the
2164                  * map.  No matter how clipped it has gotten there should
2165                  * be a fragment that is on our start boundary.
2166                  */
2167                 CLIP_CHECK_BACK(start_entry, start);
2168         }
2169
2170         if (kmflags & KM_PAGEABLE) {
2171                 /*
2172                  * This is the unwiring case.  We must first ensure that the
2173                  * range to be unwired is really wired down.  We know there
2174                  * are no holes.
2175                  */
2176                 entry = start_entry;
2177                 while ((entry != &map->header) && (entry->start < end)) {
2178                         if (entry->wired_count == 0) {
2179                                 rv = KERN_INVALID_ARGUMENT;
2180                                 goto done;
2181                         }
2182                         entry = entry->next;
2183                 }
2184
2185                 /*
2186                  * Now decrement the wiring count for each region. If a region
2187                  * becomes completely unwired, unwire its physical pages and
2188                  * mappings.
2189                  */
2190                 entry = start_entry;
2191                 while ((entry != &map->header) && (entry->start < end)) {
2192                         entry->wired_count--;
2193                         if (entry->wired_count == 0)
2194                                 vm_fault_unwire(map, entry);
2195                         entry = entry->next;
2196                 }
2197         }
2198 done:
2199         vm_map_unclip_range(map, start_entry, start, real_end, &count,
2200                 MAP_CLIP_NO_HOLES);
2201         map->timestamp++;
2202         vm_map_unlock(map);
2203 failure:
2204         if (kmflags & KM_KRESERVE)
2205                 vm_map_entry_krelease(count);
2206         else
2207                 vm_map_entry_release(count);
2208         return (rv);
2209 }
2210
2211 /*
2212  * vm_map_set_wired_quick()
2213  *
2214  *      Mark a newly allocated address range as wired but do not fault in
2215  *      the pages.  The caller is expected to load the pages into the object.
2216  *
2217  *      The map must be locked on entry and will remain locked on return.
2218  */
2219 void
2220 vm_map_set_wired_quick(vm_map_t map, vm_offset_t addr, vm_size_t size, int *countp)
2221 {
2222         vm_map_entry_t scan;
2223         vm_map_entry_t entry;
2224
2225         entry = vm_map_clip_range(map, addr, addr + size, countp, MAP_CLIP_NO_HOLES);
2226         for (scan = entry; scan != &map->header && scan->start < addr + size; scan = scan->next) {
2227             KKASSERT(entry->wired_count == 0);
2228             entry->wired_count = 1;                                              
2229         }
2230         vm_map_unclip_range(map, entry, addr, addr + size, countp, MAP_CLIP_NO_HOLES);
2231 }
2232
2233 /*
2234  * vm_map_clean
2235  *
2236  * Push any dirty cached pages in the address range to their pager.
2237  * If syncio is TRUE, dirty pages are written synchronously.
2238  * If invalidate is TRUE, any cached pages are freed as well.
2239  *
2240  * Returns an error if any part of the specified range is not mapped.
2241  */
2242 int
2243 vm_map_clean(vm_map_t map, vm_offset_t start, vm_offset_t end, boolean_t syncio,
2244     boolean_t invalidate)
2245 {
2246         vm_map_entry_t current;
2247         vm_map_entry_t entry;
2248         vm_size_t size;
2249         vm_object_t object;
2250         vm_ooffset_t offset;
2251
2252         vm_map_lock_read(map);
2253         VM_MAP_RANGE_CHECK(map, start, end);
2254         if (!vm_map_lookup_entry(map, start, &entry)) {
2255                 vm_map_unlock_read(map);
2256                 return (KERN_INVALID_ADDRESS);
2257         }
2258         /*
2259          * Make a first pass to check for holes.
2260          */
2261         for (current = entry; current->start < end; current = current->next) {
2262                 if (current->maptype == VM_MAPTYPE_SUBMAP) {
2263                         vm_map_unlock_read(map);
2264                         return (KERN_INVALID_ARGUMENT);
2265                 }
2266                 if (end > current->end &&
2267                     (current->next == &map->header ||
2268                         current->end != current->next->start)) {
2269                         vm_map_unlock_read(map);
2270                         return (KERN_INVALID_ADDRESS);
2271                 }
2272         }
2273
2274         if (invalidate)
2275                 pmap_remove(vm_map_pmap(map), start, end);
2276         /*
2277          * Make a second pass, cleaning/uncaching pages from the indicated
2278          * objects as we go.
2279          */
2280         for (current = entry; current->start < end; current = current->next) {
2281                 offset = current->offset + (start - current->start);
2282                 size = (end <= current->end ? end : current->end) - start;
2283                 if (current->maptype == VM_MAPTYPE_SUBMAP) {
2284                         vm_map_t smap;
2285                         vm_map_entry_t tentry;
2286                         vm_size_t tsize;
2287
2288                         smap = current->object.sub_map;
2289                         vm_map_lock_read(smap);
2290                         vm_map_lookup_entry(smap, offset, &tentry);
2291                         tsize = tentry->end - offset;
2292                         if (tsize < size)
2293                                 size = tsize;
2294                         object = tentry->object.vm_object;
2295                         offset = tentry->offset + (offset - tentry->start);
2296                         vm_map_unlock_read(smap);
2297                 } else {
2298                         object = current->object.vm_object;
2299                 }
2300                 /*
2301                  * Note that there is absolutely no sense in writing out
2302                  * anonymous objects, so we track down the vnode object
2303                  * to write out.
2304                  * We invalidate (remove) all pages from the address space
2305                  * anyway, for semantic correctness.
2306                  *
2307                  * note: certain anonymous maps, such as MAP_NOSYNC maps,
2308                  * may start out with a NULL object.
2309                  */
2310                 while (object && object->backing_object) {
2311                         offset += object->backing_object_offset;
2312                         object = object->backing_object;
2313                         if (object->size < OFF_TO_IDX( offset + size))
2314                                 size = IDX_TO_OFF(object->size) - offset;
2315                 }
2316                 if (object && (object->type == OBJT_VNODE) && 
2317                     (current->protection & VM_PROT_WRITE)) {
2318                         /*
2319                          * Flush pages if writing is allowed, invalidate them
2320                          * if invalidation requested.  Pages undergoing I/O
2321                          * will be ignored by vm_object_page_remove().
2322                          *
2323                          * We cannot lock the vnode and then wait for paging
2324                          * to complete without deadlocking against vm_fault.
2325                          * Instead we simply call vm_object_page_remove() and
2326                          * allow it to block internally on a page-by-page 
2327                          * basis when it encounters pages undergoing async 
2328                          * I/O.
2329                          */
2330                         int flags;
2331
2332                         vm_object_reference(object);
2333                         vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY);
2334                         flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
2335                         flags |= invalidate ? OBJPC_INVAL : 0;
2336
2337                         /*
2338                          * When operating on a virtual page table just
2339                          * flush the whole object.  XXX we probably ought
2340                          * to 
2341                          */
2342                         switch(current->maptype) {
2343                         case VM_MAPTYPE_NORMAL:
2344                                 vm_object_page_clean(object,
2345                                     OFF_TO_IDX(offset),
2346                                     OFF_TO_IDX(offset + size + PAGE_MASK),
2347                                     flags);
2348                                 break;
2349                         case VM_MAPTYPE_VPAGETABLE:
2350                                 vm_object_page_clean(object, 0, 0, flags);
2351                                 break;
2352                         }
2353                         vn_unlock(((struct vnode *)object->handle));
2354                         vm_object_deallocate(object);
2355                 }
2356                 if (object && invalidate &&
2357                    ((object->type == OBJT_VNODE) ||
2358                     (object->type == OBJT_DEVICE))) {
2359                         int clean_only = 
2360                                 (object->type == OBJT_DEVICE) ? FALSE : TRUE;
2361                         vm_object_reference(object);
2362                         switch(current->maptype) {
2363                         case VM_MAPTYPE_NORMAL:
2364                                 vm_object_page_remove(object,
2365                                     OFF_TO_IDX(offset),
2366                                     OFF_TO_IDX(offset + size + PAGE_MASK),
2367                                     clean_only);
2368                                 break;
2369                         case VM_MAPTYPE_VPAGETABLE:
2370                                 vm_object_page_remove(object, 0, 0, clean_only);
2371                                 break;
2372                         }
2373                         vm_object_deallocate(object);
2374                 }
2375                 start += size;
2376         }
2377
2378         vm_map_unlock_read(map);
2379         return (KERN_SUCCESS);
2380 }
2381
2382 /*
2383  *      vm_map_entry_unwire:    [ internal use only ]
2384  *
2385  *      Make the region specified by this entry pageable.
2386  *
2387  *      The map in question should be locked.
2388  *      [This is the reason for this routine's existence.]
2389  */
2390 static void 
2391 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
2392 {
2393         entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2394         entry->wired_count = 0;
2395         vm_fault_unwire(map, entry);
2396 }
2397
2398 /*
2399  *      vm_map_entry_delete:    [ internal use only ]
2400  *
2401  *      Deallocate the given entry from the target map.
2402  */
2403 static void
2404 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry, int *countp)
2405 {
2406         vm_map_entry_unlink(map, entry);
2407         map->size -= entry->end - entry->start;
2408
2409         switch(entry->maptype) {
2410         case VM_MAPTYPE_NORMAL:
2411         case VM_MAPTYPE_VPAGETABLE:
2412                 vm_object_deallocate(entry->object.vm_object);
2413                 break;
2414         default:
2415                 break;
2416         }
2417
2418         vm_map_entry_dispose(map, entry, countp);
2419 }
2420
2421 /*
2422  *      vm_map_delete:  [ internal use only ]
2423  *
2424  *      Deallocates the given address range from the target
2425  *      map.
2426  */
2427 int
2428 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end, int *countp)
2429 {
2430         vm_object_t object;
2431         vm_map_entry_t entry;
2432         vm_map_entry_t first_entry;
2433
2434 again:
2435         /*
2436          * Find the start of the region, and clip it.  Set entry to point
2437          * at the first record containing the requested address or, if no
2438          * such record exists, the next record with a greater address.  The
2439          * loop will run from this point until a record beyond the termination
2440          * address is encountered.
2441          *
2442          * map->hint must be adjusted to not point to anything we delete,
2443          * so set it to the entry prior to the one being deleted.
2444          *
2445          * GGG see other GGG comment.
2446          */
2447         if (vm_map_lookup_entry(map, start, &first_entry)) {
2448                 entry = first_entry;
2449                 vm_map_clip_start(map, entry, start, countp);
2450                 map->hint = entry->prev;        /* possible problem XXX */
2451         } else {
2452                 map->hint = first_entry;        /* possible problem XXX */
2453                 entry = first_entry->next;
2454         }
2455
2456         /*
2457          * If a hole opens up prior to the current first_free then
2458          * adjust first_free.  As with map->hint, map->first_free
2459          * cannot be left set to anything we might delete.
2460          */
2461         if (entry == &map->header) {
2462                 map->first_free = &map->header;
2463         } else if (map->first_free->start >= start) {
2464                 map->first_free = entry->prev;
2465         }
2466
2467         /*
2468          * Step through all entries in this region
2469          */
2470
2471         while ((entry != &map->header) && (entry->start < end)) {
2472                 vm_map_entry_t next;
2473                 vm_offset_t s, e;
2474                 vm_pindex_t offidxstart, offidxend, count;
2475
2476                 /*
2477                  * If we hit an in-transition entry we have to sleep and
2478                  * retry.  It's easier (and not really slower) to just retry
2479                  * since this case occurs so rarely and the hint is already
2480                  * pointing at the right place.  We have to reset the
2481                  * start offset so as not to accidently delete an entry
2482                  * another process just created in vacated space.
2483                  */
2484                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2485                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2486                         start = entry->start;
2487                         ++mycpu->gd_cnt.v_intrans_coll;
2488                         ++mycpu->gd_cnt.v_intrans_wait;
2489                         vm_map_transition_wait(map);
2490                         goto again;
2491                 }
2492                 vm_map_clip_end(map, entry, end, countp);
2493
2494                 s = entry->start;
2495                 e = entry->end;
2496                 next = entry->next;
2497
2498                 offidxstart = OFF_TO_IDX(entry->offset);
2499                 count = OFF_TO_IDX(e - s);
2500                 object = entry->object.vm_object;
2501
2502                 /*
2503                  * Unwire before removing addresses from the pmap; otherwise,
2504                  * unwiring will put the entries back in the pmap.
2505                  */
2506                 if (entry->wired_count != 0)
2507                         vm_map_entry_unwire(map, entry);
2508
2509                 offidxend = offidxstart + count;
2510
2511                 if (object == &kernel_object) {
2512                         vm_object_page_remove(object, offidxstart, offidxend, FALSE);
2513                 } else {
2514                         pmap_remove(map->pmap, s, e);
2515                         if (object != NULL &&
2516                             object->ref_count != 1 &&
2517                             (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING &&
2518                             (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
2519                                 vm_object_collapse(object);
2520                                 vm_object_page_remove(object, offidxstart, offidxend, FALSE);
2521                                 if (object->type == OBJT_SWAP) {
2522                                         swap_pager_freespace(object, offidxstart, count);
2523                                 }
2524                                 if (offidxend >= object->size &&
2525                                     offidxstart < object->size) {
2526                                         object->size = offidxstart;
2527                                 }
2528                         }
2529                 }
2530
2531                 /*
2532                  * Delete the entry (which may delete the object) only after
2533                  * removing all pmap entries pointing to its pages.
2534                  * (Otherwise, its page frames may be reallocated, and any
2535                  * modify bits will be set in the wrong object!)
2536                  */
2537                 vm_map_entry_delete(map, entry, countp);
2538                 entry = next;
2539         }
2540         return (KERN_SUCCESS);
2541 }
2542
2543 /*
2544  *      vm_map_remove:
2545  *
2546  *      Remove the given address range from the target map.
2547  *      This is the exported form of vm_map_delete.
2548  */
2549 int
2550 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
2551 {
2552         int result;
2553         int count;
2554
2555         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2556         vm_map_lock(map);
2557         VM_MAP_RANGE_CHECK(map, start, end);
2558         result = vm_map_delete(map, start, end, &count);
2559         vm_map_unlock(map);
2560         vm_map_entry_release(count);
2561
2562         return (result);
2563 }
2564
2565 /*
2566  *      vm_map_check_protection:
2567  *
2568  *      Assert that the target map allows the specified
2569  *      privilege on the entire address region given.
2570  *      The entire region must be allocated.
2571  */
2572 boolean_t
2573 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
2574                         vm_prot_t protection)
2575 {
2576         vm_map_entry_t entry;
2577         vm_map_entry_t tmp_entry;
2578
2579         if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
2580                 return (FALSE);
2581         }
2582         entry = tmp_entry;
2583
2584         while (start < end) {
2585                 if (entry == &map->header) {
2586                         return (FALSE);
2587                 }
2588                 /*
2589                  * No holes allowed!
2590                  */
2591
2592                 if (start < entry->start) {
2593                         return (FALSE);
2594                 }
2595                 /*
2596                  * Check protection associated with entry.
2597                  */
2598
2599                 if ((entry->protection & protection) != protection) {
2600                         return (FALSE);
2601                 }
2602                 /* go to next entry */
2603
2604                 start = entry->end;
2605                 entry = entry->next;
2606         }
2607         return (TRUE);
2608 }
2609
2610 /*
2611  * Split the pages in a map entry into a new object.  This affords
2612  * easier removal of unused pages, and keeps object inheritance from
2613  * being a negative impact on memory usage.
2614  */
2615 static void
2616 vm_map_split(vm_map_entry_t entry)
2617 {
2618         vm_page_t m;
2619         vm_object_t orig_object, new_object, source;
2620         vm_offset_t s, e;
2621         vm_pindex_t offidxstart, offidxend, idx;
2622         vm_size_t size;
2623         vm_ooffset_t offset;
2624
2625         orig_object = entry->object.vm_object;
2626         if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP)
2627                 return;
2628         if (orig_object->ref_count <= 1)
2629                 return;
2630
2631         offset = entry->offset;
2632         s = entry->start;
2633         e = entry->end;
2634
2635         offidxstart = OFF_TO_IDX(offset);
2636         offidxend = offidxstart + OFF_TO_IDX(e - s);
2637         size = offidxend - offidxstart;
2638
2639         new_object = vm_pager_allocate(orig_object->type, NULL,
2640                                        IDX_TO_OFF(size), VM_PROT_ALL, 0);
2641         if (new_object == NULL)
2642                 return;
2643
2644         source = orig_object->backing_object;
2645         if (source != NULL) {
2646                 vm_object_reference(source);    /* Referenced by new_object */
2647                 LIST_INSERT_HEAD(&source->shadow_head,
2648                                   new_object, shadow_list);
2649                 vm_object_clear_flag(source, OBJ_ONEMAPPING);
2650                 new_object->backing_object_offset = 
2651                         orig_object->backing_object_offset + IDX_TO_OFF(offidxstart);
2652                 new_object->backing_object = source;
2653                 source->shadow_count++;
2654                 source->generation++;
2655         }
2656
2657         for (idx = 0; idx < size; idx++) {
2658                 vm_page_t m;
2659
2660                 /*
2661                  * A critical section is required to avoid a race between
2662                  * the lookup and an interrupt/unbusy/free and our busy
2663                  * check.
2664                  */
2665                 crit_enter();
2666         retry:
2667                 m = vm_page_lookup(orig_object, offidxstart + idx);
2668                 if (m == NULL) {
2669                         crit_exit();
2670                         continue;
2671                 }
2672
2673                 /*
2674                  * We must wait for pending I/O to complete before we can
2675                  * rename the page.
2676                  *
2677                  * We do not have to VM_PROT_NONE the page as mappings should
2678                  * not be changed by this operation.
2679                  */
2680                 if (vm_page_sleep_busy(m, TRUE, "spltwt"))
2681                         goto retry;
2682                 vm_page_busy(m);
2683                 vm_page_rename(m, new_object, idx);
2684                 /* page automatically made dirty by rename and cache handled */
2685                 vm_page_busy(m);
2686                 crit_exit();
2687         }
2688
2689         if (orig_object->type == OBJT_SWAP) {
2690                 vm_object_pip_add(orig_object, 1);
2691                 /*
2692                  * copy orig_object pages into new_object
2693                  * and destroy unneeded pages in
2694                  * shadow object.
2695                  */
2696                 swap_pager_copy(orig_object, new_object, offidxstart, 0);
2697                 vm_object_pip_wakeup(orig_object);
2698         }
2699
2700         /*
2701          * Wakeup the pages we played with.  No spl protection is needed
2702          * for a simple wakeup.
2703          */
2704         for (idx = 0; idx < size; idx++) {
2705                 m = vm_page_lookup(new_object, idx);
2706                 if (m)
2707                         vm_page_wakeup(m);
2708         }
2709
2710         entry->object.vm_object = new_object;
2711         entry->offset = 0LL;
2712         vm_object_deallocate(orig_object);
2713 }
2714
2715 /*
2716  *      vm_map_copy_entry:
2717  *
2718  *      Copies the contents of the source entry to the destination
2719  *      entry.  The entries *must* be aligned properly.
2720  */
2721 static void
2722 vm_map_copy_entry(vm_map_t src_map, vm_map_t dst_map,
2723         vm_map_entry_t src_entry, vm_map_entry_t dst_entry)
2724 {
2725         vm_object_t src_object;
2726
2727         if (dst_entry->maptype == VM_MAPTYPE_SUBMAP)
2728                 return;
2729         if (src_entry->maptype == VM_MAPTYPE_SUBMAP)
2730                 return;
2731
2732         if (src_entry->wired_count == 0) {
2733                 /*
2734                  * If the source entry is marked needs_copy, it is already
2735                  * write-protected.
2736                  */
2737                 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
2738                         pmap_protect(src_map->pmap,
2739                             src_entry->start,
2740                             src_entry->end,
2741                             src_entry->protection & ~VM_PROT_WRITE);
2742                 }
2743
2744                 /*
2745                  * Make a copy of the object.
2746                  */
2747                 if ((src_object = src_entry->object.vm_object) != NULL) {
2748                         if ((src_object->handle == NULL) &&
2749                                 (src_object->type == OBJT_DEFAULT ||
2750                                  src_object->type == OBJT_SWAP)) {
2751                                 vm_object_collapse(src_object);
2752                                 if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
2753                                         vm_map_split(src_entry);
2754                                         src_object = src_entry->object.vm_object;
2755                                 }
2756                         }
2757
2758                         vm_object_reference(src_object);
2759                         vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
2760                         dst_entry->object.vm_object = src_object;
2761                         src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2762                         dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2763                         dst_entry->offset = src_entry->offset;
2764                 } else {
2765                         dst_entry->object.vm_object = NULL;
2766                         dst_entry->offset = 0;
2767                 }
2768
2769                 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
2770                     dst_entry->end - dst_entry->start, src_entry->start);
2771         } else {
2772                 /*
2773                  * Of course, wired down pages can't be set copy-on-write.
2774                  * Cause wired pages to be copied into the new map by
2775                  * simulating faults (the new pages are pageable)
2776                  */
2777                 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
2778         }
2779 }
2780
2781 /*
2782  * vmspace_fork:
2783  * Create a new process vmspace structure and vm_map
2784  * based on those of an existing process.  The new map
2785  * is based on the old map, according to the inheritance
2786  * values on the regions in that map.
2787  *
2788  * The source map must not be locked.
2789  */
2790 struct vmspace *
2791 vmspace_fork(struct vmspace *vm1)
2792 {
2793         struct vmspace *vm2;
2794         vm_map_t old_map = &vm1->vm_map;
2795         vm_map_t new_map;
2796         vm_map_entry_t old_entry;
2797         vm_map_entry_t new_entry;
2798         vm_object_t object;
2799         int count;
2800
2801         vm_map_lock(old_map);
2802         old_map->infork = 1;
2803
2804         /*
2805          * XXX Note: upcalls are not copied.
2806          */
2807         vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
2808         bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
2809             (caddr_t)&vm1->vm_endcopy - (caddr_t)&vm1->vm_startcopy);
2810         new_map = &vm2->vm_map; /* XXX */
2811         new_map->timestamp = 1;
2812
2813         count = 0;
2814         old_entry = old_map->header.next;
2815         while (old_entry != &old_map->header) {
2816                 ++count;
2817                 old_entry = old_entry->next;
2818         }
2819
2820         count = vm_map_entry_reserve(count + MAP_RESERVE_COUNT);
2821
2822         old_entry = old_map->header.next;
2823         while (old_entry != &old_map->header) {
2824                 if (old_entry->maptype == VM_MAPTYPE_SUBMAP)
2825                         panic("vm_map_fork: encountered a submap");
2826
2827                 switch (old_entry->inheritance) {
2828                 case VM_INHERIT_NONE:
2829                         break;
2830
2831                 case VM_INHERIT_SHARE:
2832                         /*
2833                          * Clone the entry, creating the shared object if
2834                          * necessary.
2835                          */
2836                         object = old_entry->object.vm_object;
2837                         if (object == NULL) {
2838                                 vm_map_entry_allocate_object(old_entry);
2839                                 object = old_entry->object.vm_object;
2840                         }
2841
2842                         /*
2843                          * Add the reference before calling vm_map_entry_shadow
2844                          * to insure that a shadow object is created.
2845                          */
2846                         vm_object_reference(object);
2847                         if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2848                                 vm_map_entry_shadow(old_entry);
2849                                 /* Transfer the second reference too. */
2850                                 vm_object_reference(
2851                                     old_entry->object.vm_object);
2852                                 vm_object_deallocate(object);
2853                                 object = old_entry->object.vm_object;
2854                         }
2855                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
2856
2857                         /*
2858                          * Clone the entry, referencing the shared object.
2859                          */
2860                         new_entry = vm_map_entry_create(new_map, &count);
2861                         *new_entry = *old_entry;
2862                         new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2863                         new_entry->wired_count = 0;
2864
2865                         /*
2866                          * Insert the entry into the new map -- we know we're
2867                          * inserting at the end of the new map.
2868                          */
2869
2870                         vm_map_entry_link(new_map, new_map->header.prev,
2871                             new_entry);
2872
2873                         /*
2874                          * Update the physical map
2875                          */
2876
2877                         pmap_copy(new_map->pmap, old_map->pmap,
2878                             new_entry->start,
2879                             (old_entry->end - old_entry->start),
2880                             old_entry->start);
2881                         break;
2882
2883                 case VM_INHERIT_COPY:
2884                         /*
2885                          * Clone the entry and link into the map.
2886                          */
2887                         new_entry = vm_map_entry_create(new_map, &count);
2888                         *new_entry = *old_entry;
2889                         new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2890                         new_entry->wired_count = 0;
2891                         new_entry->object.vm_object = NULL;
2892                         vm_map_entry_link(new_map, new_map->header.prev,
2893                             new_entry);
2894                         vm_map_copy_entry(old_map, new_map, old_entry,
2895                             new_entry);
2896                         break;
2897                 }
2898                 old_entry = old_entry->next;
2899         }
2900
2901         new_map->size = old_map->size;
2902         old_map->infork = 0;
2903         vm_map_unlock(old_map);
2904         vm_map_entry_release(count);
2905
2906         return (vm2);
2907 }
2908
2909 int
2910 vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
2911               vm_prot_t prot, vm_prot_t max, int cow)
2912 {
2913         vm_map_entry_t prev_entry;
2914         vm_map_entry_t new_stack_entry;
2915         vm_size_t      init_ssize;
2916         int            rv;
2917         int             count;
2918
2919         if (VM_MIN_USER_ADDRESS > 0 && addrbos < VM_MIN_USER_ADDRESS)
2920                 return (KERN_NO_SPACE);
2921
2922         if (max_ssize < sgrowsiz)
2923                 init_ssize = max_ssize;
2924         else
2925                 init_ssize = sgrowsiz;
2926
2927         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2928         vm_map_lock(map);
2929
2930         /* If addr is already mapped, no go */
2931         if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
2932                 vm_map_unlock(map);
2933                 vm_map_entry_release(count);
2934                 return (KERN_NO_SPACE);
2935         }
2936
2937         /* If we would blow our VMEM resource limit, no go */
2938         if (map->size + init_ssize >
2939             curproc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
2940                 vm_map_unlock(map);
2941                 vm_map_entry_release(count);
2942                 return (KERN_NO_SPACE);
2943         }
2944
2945         /* If we can't accomodate max_ssize in the current mapping,
2946          * no go.  However, we need to be aware that subsequent user
2947          * mappings might map into the space we have reserved for
2948          * stack, and currently this space is not protected.  
2949          * 
2950          * Hopefully we will at least detect this condition 
2951          * when we try to grow the stack.
2952          */
2953         if ((prev_entry->next != &map->header) &&
2954             (prev_entry->next->start < addrbos + max_ssize)) {
2955                 vm_map_unlock(map);
2956                 vm_map_entry_release(count);
2957                 return (KERN_NO_SPACE);
2958         }
2959
2960         /* We initially map a stack of only init_ssize.  We will
2961          * grow as needed later.  Since this is to be a grow 
2962          * down stack, we map at the top of the range.
2963          *
2964          * Note: we would normally expect prot and max to be
2965          * VM_PROT_ALL, and cow to be 0.  Possibly we should
2966          * eliminate these as input parameters, and just
2967          * pass these values here in the insert call.
2968          */
2969         rv = vm_map_insert(map, &count,
2970                            NULL, 0, addrbos + max_ssize - init_ssize,
2971                            addrbos + max_ssize,
2972                            VM_MAPTYPE_NORMAL,
2973                            prot, max,
2974                            cow);
2975
2976         /* Now set the avail_ssize amount */
2977         if (rv == KERN_SUCCESS) {
2978                 if (prev_entry != &map->header)
2979                         vm_map_clip_end(map, prev_entry, addrbos + max_ssize - init_ssize, &count);
2980                 new_stack_entry = prev_entry->next;
2981                 if (new_stack_entry->end   != addrbos + max_ssize ||
2982                     new_stack_entry->start != addrbos + max_ssize - init_ssize)
2983                         panic ("Bad entry start/end for new stack entry");
2984                 else 
2985                         new_stack_entry->aux.avail_ssize = max_ssize - init_ssize;
2986         }
2987
2988         vm_map_unlock(map);
2989         vm_map_entry_release(count);
2990         return (rv);
2991 }
2992
2993 /* Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
2994  * desired address is already mapped, or if we successfully grow
2995  * the stack.  Also returns KERN_SUCCESS if addr is outside the
2996  * stack range (this is strange, but preserves compatibility with
2997  * the grow function in vm_machdep.c).
2998  */
2999 int
3000 vm_map_growstack (struct proc *p, vm_offset_t addr)
3001 {
3002         vm_map_entry_t prev_entry;
3003         vm_map_entry_t stack_entry;
3004         vm_map_entry_t new_stack_entry;
3005         struct vmspace *vm = p->p_vmspace;
3006         vm_map_t map = &vm->vm_map;
3007         vm_offset_t    end;
3008         int grow_amount;
3009         int rv = KERN_SUCCESS;
3010         int is_procstack;
3011         int use_read_lock = 1;
3012         int count;
3013
3014         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
3015 Retry:
3016         if (use_read_lock)
3017                 vm_map_lock_read(map);
3018         else
3019                 vm_map_lock(map);
3020
3021         /* If addr is already in the entry range, no need to grow.*/
3022         if (vm_map_lookup_entry(map, addr, &prev_entry))
3023                 goto done;
3024
3025         if ((stack_entry = prev_entry->next) == &map->header)
3026                 goto done;
3027         if (prev_entry == &map->header) 
3028                 end = stack_entry->start - stack_entry->aux.avail_ssize;
3029         else
3030                 end = prev_entry->end;
3031
3032         /* This next test mimics the old grow function in vm_machdep.c.
3033          * It really doesn't quite make sense, but we do it anyway
3034          * for compatibility.
3035          *
3036          * If not growable stack, return success.  This signals the
3037          * caller to proceed as he would normally with normal vm.
3038          */
3039         if (stack_entry->aux.avail_ssize < 1 ||
3040             addr >= stack_entry->start ||
3041             addr <  stack_entry->start - stack_entry->aux.avail_ssize) {
3042                 goto done;
3043         } 
3044         
3045         /* Find the minimum grow amount */
3046         grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE);
3047         if (grow_amount > stack_entry->aux.avail_ssize) {
3048                 rv = KERN_NO_SPACE;
3049                 goto done;
3050         }
3051
3052         /* If there is no longer enough space between the entries
3053          * nogo, and adjust the available space.  Note: this 
3054          * should only happen if the user has mapped into the
3055          * stack area after the stack was created, and is
3056          * probably an error.
3057          *
3058          * This also effectively destroys any guard page the user
3059          * might have intended by limiting the stack size.
3060          */
3061         if (grow_amount > stack_entry->start - end) {
3062                 if (use_read_lock && vm_map_lock_upgrade(map)) {
3063                         use_read_lock = 0;
3064                         goto Retry;
3065                 }
3066                 use_read_lock = 0;
3067                 stack_entry->aux.avail_ssize = stack_entry->start - end;
3068                 rv = KERN_NO_SPACE;
3069                 goto done;
3070         }
3071
3072         is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr;
3073
3074         /* If this is the main process stack, see if we're over the 
3075          * stack limit.
3076          */
3077         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
3078                              p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
3079                 rv = KERN_NO_SPACE;
3080                 goto done;
3081         }
3082
3083         /* Round up the grow amount modulo SGROWSIZ */
3084         grow_amount = roundup (grow_amount, sgrowsiz);
3085         if (grow_amount > stack_entry->aux.avail_ssize) {
3086                 grow_amount = stack_entry->aux.avail_ssize;
3087         }
3088         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
3089                              p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
3090                 grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
3091                               ctob(vm->vm_ssize);
3092         }
3093
3094         /* If we would blow our VMEM resource limit, no go */
3095         if (map->size + grow_amount > p->p_rlimit[RLIMIT_VMEM].rlim_cur) {
3096                 rv = KERN_NO_SPACE;
3097                 goto done;
3098         }
3099
3100         if (use_read_lock && vm_map_lock_upgrade(map)) {
3101                 use_read_lock = 0;
3102                 goto Retry;
3103         }
3104         use_read_lock = 0;
3105
3106         /* Get the preliminary new entry start value */
3107         addr = stack_entry->start - grow_amount;
3108
3109         /* If this puts us into the previous entry, cut back our growth
3110          * to the available space.  Also, see the note above.
3111          */
3112         if (addr < end) {
3113                 stack_entry->aux.avail_ssize = stack_entry->start - end;
3114                 addr = end;
3115         }
3116
3117         rv = vm_map_insert(map, &count,
3118                            NULL, 0, addr, stack_entry->start,
3119                            VM_MAPTYPE_NORMAL,
3120                            VM_PROT_ALL, VM_PROT_ALL,
3121                            0);
3122
3123         /* Adjust the available stack space by the amount we grew. */
3124         if (rv == KERN_SUCCESS) {
3125                 if (prev_entry != &map->header)
3126                         vm_map_clip_end(map, prev_entry, addr, &count);
3127                 new_stack_entry = prev_entry->next;
3128                 if (new_stack_entry->end   != stack_entry->start  ||
3129                     new_stack_entry->start != addr)
3130                         panic ("Bad stack grow start/end in new stack entry");
3131                 else {
3132                         new_stack_entry->aux.avail_ssize =
3133                                 stack_entry->aux.avail_ssize -
3134                                 (new_stack_entry->end - new_stack_entry->start);
3135                         if (is_procstack)
3136                                 vm->vm_ssize += btoc(new_stack_entry->end -
3137                                                      new_stack_entry->start);
3138                 }
3139         }
3140
3141 done:
3142         if (use_read_lock)
3143                 vm_map_unlock_read(map);
3144         else
3145                 vm_map_unlock(map);
3146         vm_map_entry_release(count);
3147         return (rv);
3148 }
3149
3150 /*
3151  * Unshare the specified VM space for exec.  If other processes are
3152  * mapped to it, then create a new one.  The new vmspace is null.
3153  */
3154
3155 void
3156 vmspace_exec(struct proc *p, struct vmspace *vmcopy) 
3157 {
3158         struct vmspace *oldvmspace = p->p_vmspace;
3159         struct vmspace *newvmspace;
3160         vm_map_t map = &p->p_vmspace->vm_map;
3161
3162         /*
3163          * If we are execing a resident vmspace we fork it, otherwise
3164          * we create a new vmspace.  Note that exitingcnt and upcalls
3165          * are not copied to the new vmspace.
3166          */
3167         if (vmcopy)  {
3168             newvmspace = vmspace_fork(vmcopy);
3169         } else {
3170             newvmspace = vmspace_alloc(map->min_offset, map->max_offset);
3171             bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
3172                 (caddr_t)&oldvmspace->vm_endcopy - 
3173                     (caddr_t)&oldvmspace->vm_startcopy);
3174         }
3175
3176         /*
3177          * This code is written like this for prototype purposes.  The
3178          * goal is to avoid running down the vmspace here, but let the
3179          * other process's that are still using the vmspace to finally
3180          * run it down.  Even though there is little or no chance of blocking
3181          * here, it is a good idea to keep this form for future mods.
3182          */
3183         p->p_vmspace = newvmspace;
3184         pmap_pinit2(vmspace_pmap(newvmspace));
3185         if (p == curproc)
3186                 pmap_activate(p);
3187         vmspace_free(oldvmspace);
3188 }
3189
3190 /*
3191  * Unshare the specified VM space for forcing COW.  This
3192  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
3193  *
3194  * The exitingcnt test is not strictly necessary but has been
3195  * included for code sanity (to make the code a bit more deterministic).
3196  */
3197
3198 void
3199 vmspace_unshare(struct proc *p) 
3200 {
3201         struct vmspace *oldvmspace = p->p_vmspace;
3202         struct vmspace *newvmspace;
3203
3204         if (oldvmspace->vm_refcnt == 1 && oldvmspace->vm_exitingcnt == 0)
3205                 return;
3206         newvmspace = vmspace_fork(oldvmspace);
3207         p->p_vmspace = newvmspace;
3208         pmap_pinit2(vmspace_pmap(newvmspace));
3209         if (p == curproc)
3210                 pmap_activate(p);
3211         vmspace_free(oldvmspace);
3212 }
3213
3214 /*
3215  *      vm_map_lookup:
3216  *
3217  *      Finds the VM object, offset, and
3218  *      protection for a given virtual address in the
3219  *      specified map, assuming a page fault of the
3220  *      type specified.
3221  *
3222  *      Leaves the map in question locked for read; return
3223  *      values are guaranteed until a vm_map_lookup_done
3224  *      call is performed.  Note that the map argument
3225  *      is in/out; the returned map must be used in
3226  *      the call to vm_map_lookup_done.
3227  *
3228  *      A handle (out_entry) is returned for use in
3229  *      vm_map_lookup_done, to make that fast.
3230  *
3231  *      If a lookup is requested with "write protection"
3232  *      specified, the map may be changed to perform virtual
3233  *      copying operations, although the data referenced will
3234  *      remain the same.
3235  */
3236 int
3237 vm_map_lookup(vm_map_t *var_map,                /* IN/OUT */
3238               vm_offset_t vaddr,
3239               vm_prot_t fault_typea,
3240               vm_map_entry_t *out_entry,        /* OUT */
3241               vm_object_t *object,              /* OUT */
3242               vm_pindex_t *pindex,              /* OUT */
3243               vm_prot_t *out_prot,              /* OUT */
3244               boolean_t *wired)                 /* OUT */
3245 {
3246         vm_map_entry_t entry;
3247         vm_map_t map = *var_map;
3248         vm_prot_t prot;
3249         vm_prot_t fault_type = fault_typea;
3250         int use_read_lock = 1;
3251         int rv = KERN_SUCCESS;
3252
3253 RetryLookup:
3254         if (use_read_lock)
3255                 vm_map_lock_read(map);
3256         else
3257                 vm_map_lock(map);
3258
3259         /*
3260          * If the map has an interesting hint, try it before calling full
3261          * blown lookup routine.
3262          */
3263         entry = map->hint;
3264         *out_entry = entry;
3265
3266         if ((entry == &map->header) ||
3267             (vaddr < entry->start) || (vaddr >= entry->end)) {
3268                 vm_map_entry_t tmp_entry;
3269
3270                 /*
3271                  * Entry was either not a valid hint, or the vaddr was not
3272                  * contained in the entry, so do a full lookup.
3273                  */
3274                 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) {
3275                         rv = KERN_INVALID_ADDRESS;
3276                         goto done;
3277                 }
3278
3279                 entry = tmp_entry;
3280                 *out_entry = entry;
3281         }
3282         
3283         /*
3284          * Handle submaps.
3285          */
3286         if (entry->maptype == VM_MAPTYPE_SUBMAP) {
3287                 vm_map_t old_map = map;
3288
3289                 *var_map = map = entry->object.sub_map;
3290                 if (use_read_lock)
3291                         vm_map_unlock_read(old_map);
3292                 else
3293                         vm_map_unlock(old_map);
3294                 use_read_lock = 1;
3295                 goto RetryLookup;
3296         }
3297
3298         /*
3299          * Check whether this task is allowed to have this page.
3300          * Note the special case for MAP_ENTRY_COW
3301          * pages with an override.  This is to implement a forced
3302          * COW for debuggers.
3303          */
3304
3305         if (fault_type & VM_PROT_OVERRIDE_WRITE)
3306                 prot = entry->max_protection;
3307         else
3308                 prot = entry->protection;
3309
3310         fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
3311         if ((fault_type & prot) != fault_type) {
3312                 rv = KERN_PROTECTION_FAILURE;
3313                 goto done;
3314         }
3315
3316         if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
3317             (entry->eflags & MAP_ENTRY_COW) &&
3318             (fault_type & VM_PROT_WRITE) &&
3319             (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
3320                 rv = KERN_PROTECTION_FAILURE;
3321                 goto done;
3322         }
3323
3324         /*
3325          * If this page is not pageable, we have to get it for all possible
3326          * accesses.
3327          */
3328         *wired = (entry->wired_count != 0);
3329         if (*wired)
3330                 prot = fault_type = entry->protection;
3331
3332         /*
3333          * Virtual page tables may need to update the accessed (A) bit
3334          * in a page table entry.  Upgrade the fault to a write fault for
3335          * that case if the map will support it.  If the map does not support
3336          * it the page table entry simply will not be updated.
3337          */
3338         if (entry->maptype == VM_MAPTYPE_VPAGETABLE) {
3339                 if (prot & VM_PROT_WRITE)
3340                         fault_type |= VM_PROT_WRITE;
3341         }
3342
3343         /*
3344          * If the entry was copy-on-write, we either ...
3345          */
3346         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3347                 /*
3348                  * If we want to write the page, we may as well handle that
3349                  * now since we've got the map locked.
3350                  *
3351                  * If we don't need to write the page, we just demote the
3352                  * permissions allowed.
3353                  */
3354
3355                 if (fault_type & VM_PROT_WRITE) {
3356                         /*
3357                          * Make a new object, and place it in the object
3358                          * chain.  Note that no new references have appeared
3359                          * -- one just moved from the map to the new
3360                          * object.
3361                          */
3362
3363                         if (use_read_lock && vm_map_lock_upgrade(map)) {
3364                                 use_read_lock = 0;
3365                                 goto RetryLookup;
3366                         }
3367                         use_read_lock = 0;
3368
3369                         vm_map_entry_shadow(entry);
3370                 } else {
3371                         /*
3372                          * We're attempting to read a copy-on-write page --
3373                          * don't allow writes.
3374                          */
3375
3376                         prot &= ~VM_PROT_WRITE;
3377                 }
3378         }
3379
3380         /*
3381          * Create an object if necessary.
3382          */
3383         if (entry->object.vm_object == NULL &&
3384             !map->system_map) {
3385                 if (use_read_lock && vm_map_lock_upgrade(map))  {
3386                         use_read_lock = 0;
3387                         goto RetryLookup;
3388                 }
3389                 use_read_lock = 0;
3390                 vm_map_entry_allocate_object(entry);
3391         }
3392
3393         /*
3394          * Return the object/offset from this entry.  If the entry was
3395          * copy-on-write or empty, it has been fixed up.
3396          */
3397
3398         *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
3399         *object = entry->object.vm_object;
3400
3401         /*
3402          * Return whether this is the only map sharing this data.  On
3403          * success we return with a read lock held on the map.  On failure
3404          * we return with the map unlocked.
3405          */
3406         *out_prot = prot;
3407 done:
3408         if (rv == KERN_SUCCESS) {
3409                 if (use_read_lock == 0)
3410                         vm_map_lock_downgrade(map);
3411         } else if (use_read_lock) {
3412                 vm_map_unlock_read(map);
3413         } else {
3414                 vm_map_unlock(map);
3415         }
3416         return (rv);
3417 }
3418
3419 /*
3420  *      vm_map_lookup_done:
3421  *
3422  *      Releases locks acquired by a vm_map_lookup
3423  *      (according to the handle returned by that lookup).
3424  */
3425
3426 void
3427 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry, int count)
3428 {
3429         /*
3430          * Unlock the main-level map
3431          */
3432         vm_map_unlock_read(map);
3433         if (count)
3434                 vm_map_entry_release(count);
3435 }
3436
3437 #include "opt_ddb.h"
3438 #ifdef DDB
3439 #include <sys/kernel.h>
3440
3441 #include <ddb/ddb.h>
3442
3443 /*
3444  *      vm_map_print:   [ debug ]
3445  */
3446 DB_SHOW_COMMAND(map, vm_map_print)
3447 {
3448         static int nlines;
3449         /* XXX convert args. */
3450         vm_map_t map = (vm_map_t)addr;
3451         boolean_t full = have_addr;
3452
3453         vm_map_entry_t entry;
3454
3455         db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
3456             (void *)map,
3457             (void *)map->pmap, map->nentries, map->timestamp);
3458         nlines++;
3459
3460         if (!full && db_indent)
3461                 return;
3462
3463         db_indent += 2;
3464         for (entry = map->header.next; entry != &map->header;
3465             entry = entry->next) {
3466                 db_iprintf("map entry %p: start=%p, end=%p\n",
3467                     (void *)entry, (void *)entry->start, (void *)entry->end);
3468                 nlines++;
3469                 {
3470                         static char *inheritance_name[4] =
3471                         {"share", "copy", "none", "donate_copy"};
3472
3473                         db_iprintf(" prot=%x/%x/%s",
3474                             entry->protection,
3475                             entry->max_protection,
3476                             inheritance_name[(int)(unsigned char)entry->inheritance]);
3477                         if (entry->wired_count != 0)
3478                                 db_printf(", wired");
3479                 }
3480                 if (entry->maptype == VM_MAPTYPE_SUBMAP) {
3481                         /* XXX no %qd in kernel.  Truncate entry->offset. */
3482                         db_printf(", share=%p, offset=0x%lx\n",
3483                             (void *)entry->object.sub_map,
3484                             (long)entry->offset);
3485                         nlines++;
3486                         if ((entry->prev == &map->header) ||
3487                             (entry->prev->object.sub_map !=
3488                                 entry->object.sub_map)) {
3489                                 db_indent += 2;
3490                                 vm_map_print((db_expr_t)(intptr_t)
3491                                              entry->object.sub_map,
3492                                              full, 0, (char *)0);
3493                                 db_indent -= 2;
3494                         }
3495                 } else {
3496                         /* XXX no %qd in kernel.  Truncate entry->offset. */
3497                         db_printf(", object=%p, offset=0x%lx",
3498                             (void *)entry->object.vm_object,
3499                             (long)entry->offset);
3500                         if (entry->eflags & MAP_ENTRY_COW)
3501                                 db_printf(", copy (%s)",
3502                                     (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
3503                         db_printf("\n");
3504                         nlines++;
3505
3506                         if ((entry->prev == &map->header) ||
3507                             (entry->prev->object.vm_object !=
3508                                 entry->object.vm_object)) {
3509                                 db_indent += 2;
3510                                 vm_object_print((db_expr_t)(intptr_t)
3511                                                 entry->object.vm_object,
3512                                                 full, 0, (char *)0);
3513                                 nlines += 4;
3514                                 db_indent -= 2;
3515                         }
3516                 }
3517         }
3518         db_indent -= 2;
3519         if (db_indent == 0)
3520                 nlines = 0;
3521 }
3522
3523
3524 DB_SHOW_COMMAND(procvm, procvm)
3525 {
3526         struct proc *p;
3527
3528         if (have_addr) {
3529                 p = (struct proc *) addr;
3530         } else {
3531                 p = curproc;
3532         }
3533
3534         db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
3535             (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
3536             (void *)vmspace_pmap(p->p_vmspace));
3537
3538         vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
3539 }
3540
3541 #endif /* DDB */