MAP_VPAGETABLE support part 3/3.
[dragonfly.git] / sys / vm / vm_map.c
1 /*
2  * Copyright (c) 1991, 1993
3  *      The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by the University of
19  *      California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *      from: @(#)vm_map.c      8.3 (Berkeley) 1/12/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $FreeBSD: src/sys/vm/vm_map.c,v 1.187.2.19 2003/05/27 00:47:02 alc Exp $
65  * $DragonFly: src/sys/vm/vm_map.c,v 1.49 2006/09/13 17:10:42 dillon Exp $
66  */
67
68 /*
69  *      Virtual memory mapping module.
70  */
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/proc.h>
75 #include <sys/lock.h>
76 #include <sys/vmmeter.h>
77 #include <sys/mman.h>
78 #include <sys/vnode.h>
79 #include <sys/resourcevar.h>
80 #include <sys/shm.h>
81 #include <sys/tree.h>
82
83 #include <vm/vm.h>
84 #include <vm/vm_param.h>
85 #include <vm/pmap.h>
86 #include <vm/vm_map.h>
87 #include <vm/vm_page.h>
88 #include <vm/vm_object.h>
89 #include <vm/vm_pager.h>
90 #include <vm/vm_kern.h>
91 #include <vm/vm_extern.h>
92 #include <vm/swap_pager.h>
93 #include <vm/vm_zone.h>
94
95 #include <sys/thread2.h>
96
97 /*
98  *      Virtual memory maps provide for the mapping, protection,
99  *      and sharing of virtual memory objects.  In addition,
100  *      this module provides for an efficient virtual copy of
101  *      memory from one map to another.
102  *
103  *      Synchronization is required prior to most operations.
104  *
105  *      Maps consist of an ordered doubly-linked list of simple
106  *      entries; a single hint is used to speed up lookups.
107  *
108  *      Since portions of maps are specified by start/end addresses,
109  *      which may not align with existing map entries, all
110  *      routines merely "clip" entries to these start/end values.
111  *      [That is, an entry is split into two, bordering at a
112  *      start or end value.]  Note that these clippings may not
113  *      always be necessary (as the two resulting entries are then
114  *      not changed); however, the clipping is done for convenience.
115  *
116  *      As mentioned above, virtual copy operations are performed
117  *      by copying VM object references from one map to
118  *      another, and then marking both regions as copy-on-write.
119  */
120
121 /*
122  *      vm_map_startup:
123  *
124  *      Initialize the vm_map module.  Must be called before
125  *      any other vm_map routines.
126  *
127  *      Map and entry structures are allocated from the general
128  *      purpose memory pool with some exceptions:
129  *
130  *      - The kernel map and kmem submap are allocated statically.
131  *      - Kernel map entries are allocated out of a static pool.
132  *
133  *      These restrictions are necessary since malloc() uses the
134  *      maps and requires map entries.
135  */
136
137 #define VMEPERCPU       2
138
139 static struct vm_zone mapentzone_store, mapzone_store;
140 static vm_zone_t mapentzone, mapzone, vmspace_zone;
141 static struct vm_object mapentobj, mapobj;
142
143 static struct vm_map_entry map_entry_init[MAX_MAPENT];
144 static struct vm_map_entry cpu_map_entry_init[MAXCPU][VMEPERCPU];
145 static struct vm_map map_init[MAX_KMAP];
146
147 static vm_map_entry_t vm_map_entry_create(vm_map_t map, int *);
148 static void vm_map_entry_dispose (vm_map_t map, vm_map_entry_t entry, int *);
149 static void _vm_map_clip_end (vm_map_t, vm_map_entry_t, vm_offset_t, int *);
150 static void _vm_map_clip_start (vm_map_t, vm_map_entry_t, vm_offset_t, int *);
151 static void vm_map_entry_delete (vm_map_t, vm_map_entry_t, int *);
152 static void vm_map_entry_unwire (vm_map_t, vm_map_entry_t);
153 static void vm_map_copy_entry (vm_map_t, vm_map_t, vm_map_entry_t,
154                 vm_map_entry_t);
155 static void vm_map_split (vm_map_entry_t);
156 static void vm_map_unclip_range (vm_map_t map, vm_map_entry_t start_entry, vm_offset_t start, vm_offset_t end, int *count, int flags);
157
158 void
159 vm_map_startup(void)
160 {
161         mapzone = &mapzone_store;
162         zbootinit(mapzone, "MAP", sizeof (struct vm_map),
163                 map_init, MAX_KMAP);
164         mapentzone = &mapentzone_store;
165         zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry),
166                 map_entry_init, MAX_MAPENT);
167 }
168
169 /*
170  * Red black tree functions
171  */
172 static int rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b);
173 RB_GENERATE(vm_map_rb_tree, vm_map_entry, rb_entry, rb_vm_map_compare);
174
175 /* a->start is address, and the only field has to be initialized */
176 static int
177 rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b)
178 {
179         if (a->start < b->start)
180                 return(-1);
181         else if (a->start > b->start)
182                 return(1);
183         return(0);
184 }
185
186 /*
187  * Allocate a vmspace structure, including a vm_map and pmap,
188  * and initialize those structures.  The refcnt is set to 1.
189  * The remaining fields must be initialized by the caller.
190  */
191 struct vmspace *
192 vmspace_alloc(vm_offset_t min, vm_offset_t max)
193 {
194         struct vmspace *vm;
195
196         vm = zalloc(vmspace_zone);
197         vm_map_init(&vm->vm_map, min, max);
198         pmap_pinit(vmspace_pmap(vm));
199         vm->vm_map.pmap = vmspace_pmap(vm);             /* XXX */
200         vm->vm_refcnt = 1;
201         vm->vm_shm = NULL;
202         vm->vm_exitingcnt = 0;
203         return (vm);
204 }
205
206 void
207 vm_init2(void) 
208 {
209         zinitna(mapentzone, &mapentobj, NULL, 0, 0, 
210                 ZONE_USE_RESERVE | ZONE_SPECIAL, 1);
211         zinitna(mapzone, &mapobj, NULL, 0, 0, 0, 1);
212         vmspace_zone = zinit("VMSPACE", sizeof (struct vmspace), 0, 0, 3);
213         pmap_init2();
214         vm_object_init2();
215 }
216
217 static __inline void
218 vmspace_dofree(struct vmspace *vm)
219 {
220         int count;
221
222         /*
223          * Make sure any SysV shm is freed, it might not have in
224          * exit1()
225          */
226         shmexit(vm);
227
228         KKASSERT(vm->vm_upcalls == NULL);
229
230         /*
231          * Lock the map, to wait out all other references to it.
232          * Delete all of the mappings and pages they hold, then call
233          * the pmap module to reclaim anything left.
234          */
235         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
236         vm_map_lock(&vm->vm_map);
237         vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
238                 vm->vm_map.max_offset, &count);
239         vm_map_unlock(&vm->vm_map);
240         vm_map_entry_release(count);
241
242         pmap_release(vmspace_pmap(vm));
243         zfree(vmspace_zone, vm);
244 }
245
246 void
247 vmspace_free(struct vmspace *vm)
248 {
249         if (vm->vm_refcnt == 0)
250                 panic("vmspace_free: attempt to free already freed vmspace");
251
252         if (--vm->vm_refcnt == 0 && vm->vm_exitingcnt == 0)
253                 vmspace_dofree(vm);
254 }
255
256 void
257 vmspace_exitfree(struct proc *p)
258 {
259         struct vmspace *vm;
260
261         vm = p->p_vmspace;
262         p->p_vmspace = NULL;
263
264         /*
265          * cleanup by parent process wait()ing on exiting child.  vm_refcnt
266          * may not be 0 (e.g. fork() and child exits without exec()ing).
267          * exitingcnt may increment above 0 and drop back down to zero
268          * several times while vm_refcnt is held non-zero.  vm_refcnt
269          * may also increment above 0 and drop back down to zero several
270          * times while vm_exitingcnt is held non-zero.
271          *
272          * The last wait on the exiting child's vmspace will clean up
273          * the remainder of the vmspace.
274          */
275         if (--vm->vm_exitingcnt == 0 && vm->vm_refcnt == 0)
276                 vmspace_dofree(vm);
277 }
278
279 /*
280  * vmspace_swap_count() - count the approximate swap useage in pages for a
281  *                        vmspace.
282  *
283  *      Swap useage is determined by taking the proportional swap used by
284  *      VM objects backing the VM map.  To make up for fractional losses,
285  *      if the VM object has any swap use at all the associated map entries
286  *      count for at least 1 swap page.
287  */
288 int
289 vmspace_swap_count(struct vmspace *vmspace)
290 {
291         vm_map_t map = &vmspace->vm_map;
292         vm_map_entry_t cur;
293         vm_object_t object;
294         int count = 0;
295         int n;
296
297         for (cur = map->header.next; cur != &map->header; cur = cur->next) {
298                 switch(cur->maptype) {
299                 case VM_MAPTYPE_NORMAL:
300                 case VM_MAPTYPE_VPAGETABLE:
301                         if ((object = cur->object.vm_object) == NULL)
302                                 break;
303                         if (object->type != OBJT_SWAP)
304                                 break;
305                         n = (cur->end - cur->start) / PAGE_SIZE;
306                         if (object->un_pager.swp.swp_bcount) {
307                                 count += object->un_pager.swp.swp_bcount *
308                                     SWAP_META_PAGES * n / object->size + 1;
309                         }
310                         break;
311                 default:
312                         break;
313                 }
314         }
315         return(count);
316 }
317
318
319 /*
320  *      vm_map_create:
321  *
322  *      Creates and returns a new empty VM map with
323  *      the given physical map structure, and having
324  *      the given lower and upper address bounds.
325  */
326 vm_map_t
327 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
328 {
329         vm_map_t result;
330
331         result = zalloc(mapzone);
332         vm_map_init(result, min, max);
333         result->pmap = pmap;
334         return (result);
335 }
336
337 /*
338  * Initialize an existing vm_map structure
339  * such as that in the vmspace structure.
340  * The pmap is set elsewhere.
341  */
342 void
343 vm_map_init(struct vm_map *map, vm_offset_t min, vm_offset_t max)
344 {
345         map->header.next = map->header.prev = &map->header;
346         RB_INIT(&map->rb_root);
347         map->nentries = 0;
348         map->size = 0;
349         map->system_map = 0;
350         map->infork = 0;
351         map->min_offset = min;
352         map->max_offset = max;
353         map->first_free = &map->header;
354         map->hint = &map->header;
355         map->timestamp = 0;
356         lockinit(&map->lock, "thrd_sleep", 0, 0);
357 }
358
359 /*
360  *      vm_map_entry_reserve_cpu_init:
361  *
362  *      Set an initial negative count so the first attempt to reserve
363  *      space preloads a bunch of vm_map_entry's for this cpu.  Also
364  *      pre-allocate 2 vm_map_entries which will be needed by zalloc() to
365  *      map a new page for vm_map_entry structures.  SMP systems are
366  *      particularly sensitive.
367  *
368  *      This routine is called in early boot so we cannot just call
369  *      vm_map_entry_reserve().
370  *
371  *      May be called for a gd other then mycpu, but may only be called
372  *      during early boot.
373  */
374 void
375 vm_map_entry_reserve_cpu_init(globaldata_t gd)
376 {
377         vm_map_entry_t entry;
378         int i;
379
380         gd->gd_vme_avail -= MAP_RESERVE_COUNT * 2;
381         entry = &cpu_map_entry_init[gd->gd_cpuid][0];
382         for (i = 0; i < VMEPERCPU; ++i, ++entry) {
383                 entry->next = gd->gd_vme_base;
384                 gd->gd_vme_base = entry;
385         }
386 }
387
388 /*
389  *      vm_map_entry_reserve:
390  *
391  *      Reserves vm_map_entry structures so code later on can manipulate
392  *      map_entry structures within a locked map without blocking trying
393  *      to allocate a new vm_map_entry.
394  */
395 int
396 vm_map_entry_reserve(int count)
397 {
398         struct globaldata *gd = mycpu;
399         vm_map_entry_t entry;
400
401         crit_enter();
402
403         /*
404          * Make sure we have enough structures in gd_vme_base to handle
405          * the reservation request.
406          */
407         while (gd->gd_vme_avail < count) {
408                 entry = zalloc(mapentzone);
409                 entry->next = gd->gd_vme_base;
410                 gd->gd_vme_base = entry;
411                 ++gd->gd_vme_avail;
412         }
413         gd->gd_vme_avail -= count;
414         crit_exit();
415         return(count);
416 }
417
418 /*
419  *      vm_map_entry_release:
420  *
421  *      Releases previously reserved vm_map_entry structures that were not
422  *      used.  If we have too much junk in our per-cpu cache clean some of
423  *      it out.
424  */
425 void
426 vm_map_entry_release(int count)
427 {
428         struct globaldata *gd = mycpu;
429         vm_map_entry_t entry;
430
431         crit_enter();
432         gd->gd_vme_avail += count;
433         while (gd->gd_vme_avail > MAP_RESERVE_SLOP) {
434                 entry = gd->gd_vme_base;
435                 KKASSERT(entry != NULL);
436                 gd->gd_vme_base = entry->next;
437                 --gd->gd_vme_avail;
438                 crit_exit();
439                 zfree(mapentzone, entry);
440                 crit_enter();
441         }
442         crit_exit();
443 }
444
445 /*
446  *      vm_map_entry_kreserve:
447  *
448  *      Reserve map entry structures for use in kernel_map itself.  These
449  *      entries have *ALREADY* been reserved on a per-cpu basis when the map
450  *      was inited.  This function is used by zalloc() to avoid a recursion
451  *      when zalloc() itself needs to allocate additional kernel memory.
452  *
453  *      This function works like the normal reserve but does not load the
454  *      vm_map_entry cache (because that would result in an infinite
455  *      recursion).  Note that gd_vme_avail may go negative.  This is expected.
456  *
457  *      Any caller of this function must be sure to renormalize after 
458  *      potentially eating entries to ensure that the reserve supply
459  *      remains intact.
460  */
461 int
462 vm_map_entry_kreserve(int count)
463 {
464         struct globaldata *gd = mycpu;
465
466         crit_enter();
467         gd->gd_vme_avail -= count;
468         crit_exit();
469         KASSERT(gd->gd_vme_base != NULL, ("no reserved entries left, gd_vme_avail = %d\n", gd->gd_vme_avail));
470         return(count);
471 }
472
473 /*
474  *      vm_map_entry_krelease:
475  *
476  *      Release previously reserved map entries for kernel_map.  We do not
477  *      attempt to clean up like the normal release function as this would
478  *      cause an unnecessary (but probably not fatal) deep procedure call.
479  */
480 void
481 vm_map_entry_krelease(int count)
482 {
483         struct globaldata *gd = mycpu;
484
485         crit_enter();
486         gd->gd_vme_avail += count;
487         crit_exit();
488 }
489
490 /*
491  *      vm_map_entry_create:    [ internal use only ]
492  *
493  *      Allocates a VM map entry for insertion.  No entry fields are filled 
494  *      in.
495  *
496  *      This routine may be called from an interrupt thread but not a FAST
497  *      interrupt.  This routine may recurse the map lock.
498  */
499 static vm_map_entry_t
500 vm_map_entry_create(vm_map_t map, int *countp)
501 {
502         struct globaldata *gd = mycpu;
503         vm_map_entry_t entry;
504
505         KKASSERT(*countp > 0);
506         --*countp;
507         crit_enter();
508         entry = gd->gd_vme_base;
509         KASSERT(entry != NULL, ("gd_vme_base NULL! count %d", *countp));
510         gd->gd_vme_base = entry->next;
511         crit_exit();
512         return(entry);
513 }
514
515 /*
516  *      vm_map_entry_dispose:   [ internal use only ]
517  *
518  *      Dispose of a vm_map_entry that is no longer being referenced.  This
519  *      function may be called from an interrupt.
520  */
521 static void
522 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry, int *countp)
523 {
524         struct globaldata *gd = mycpu;
525
526         KKASSERT(map->hint != entry);
527         KKASSERT(map->first_free != entry);
528
529         ++*countp;
530         crit_enter();
531         entry->next = gd->gd_vme_base;
532         gd->gd_vme_base = entry;
533         crit_exit();
534 }
535
536
537 /*
538  *      vm_map_entry_{un,}link:
539  *
540  *      Insert/remove entries from maps.
541  */
542 static __inline void
543 vm_map_entry_link(vm_map_t map,
544                   vm_map_entry_t after_where,
545                   vm_map_entry_t entry)
546 {
547         map->nentries++;
548         entry->prev = after_where;
549         entry->next = after_where->next;
550         entry->next->prev = entry;
551         after_where->next = entry;
552         if (vm_map_rb_tree_RB_INSERT(&map->rb_root, entry))
553                 panic("vm_map_entry_link: dup addr map %p ent %p", map, entry);
554 }
555
556 static __inline void
557 vm_map_entry_unlink(vm_map_t map,
558                     vm_map_entry_t entry)
559 {
560         vm_map_entry_t prev;
561         vm_map_entry_t next;
562
563         if (entry->eflags & MAP_ENTRY_IN_TRANSITION)
564                 panic("vm_map_entry_unlink: attempt to mess with locked entry! %p", entry);
565         prev = entry->prev;
566         next = entry->next;
567         next->prev = prev;
568         prev->next = next;
569         vm_map_rb_tree_RB_REMOVE(&map->rb_root, entry);
570         map->nentries--;
571 }
572
573 /*
574  *      vm_map_lookup_entry:    [ internal use only ]
575  *
576  *      Finds the map entry containing (or
577  *      immediately preceding) the specified address
578  *      in the given map; the entry is returned
579  *      in the "entry" parameter.  The boolean
580  *      result indicates whether the address is
581  *      actually contained in the map.
582  */
583 boolean_t
584 vm_map_lookup_entry(vm_map_t map, vm_offset_t address,
585     vm_map_entry_t *entry /* OUT */)
586 {
587         vm_map_entry_t tmp;
588         vm_map_entry_t last;
589
590 #if 0
591         /*
592          * XXX TEMPORARILY DISABLED.  For some reason our attempt to revive
593          * the hint code with the red-black lookup meets with system crashes
594          * and lockups.  We do not yet know why.
595          *
596          * It is possible that the problem is related to the setting
597          * of the hint during map_entry deletion, in the code specified
598          * at the GGG comment later on in this file.
599          */
600         /*
601          * Quickly check the cached hint, there's a good chance of a match.
602          */
603         if (map->hint != &map->header) {
604                 tmp = map->hint;
605                 if (address >= tmp->start && address < tmp->end) {
606                         *entry = tmp;
607                         return(TRUE);
608                 }
609         }
610 #endif
611
612         /*
613          * Locate the record from the top of the tree.  'last' tracks the
614          * closest prior record and is returned if no match is found, which
615          * in binary tree terms means tracking the most recent right-branch
616          * taken.  If there is no prior record, &map->header is returned.
617          */
618         last = &map->header;
619         tmp = RB_ROOT(&map->rb_root);
620
621         while (tmp) {
622                 if (address >= tmp->start) {
623                         if (address < tmp->end) {
624                                 *entry = tmp;
625                                 map->hint = tmp;
626                                 return(TRUE);
627                         }
628                         last = tmp;
629                         tmp = RB_RIGHT(tmp, rb_entry);
630                 } else {
631                         tmp = RB_LEFT(tmp, rb_entry);
632                 }
633         }
634         *entry = last;
635         return (FALSE);
636 }
637
638 /*
639  *      vm_map_insert:
640  *
641  *      Inserts the given whole VM object into the target
642  *      map at the specified address range.  The object's
643  *      size should match that of the address range.
644  *
645  *      Requires that the map be locked, and leaves it so.  Requires that
646  *      sufficient vm_map_entry structures have been reserved and tracks
647  *      the use via countp.
648  *
649  *      If object is non-NULL, ref count must be bumped by caller
650  *      prior to making call to account for the new entry.
651  */
652 int
653 vm_map_insert(vm_map_t map, int *countp,
654               vm_object_t object, vm_ooffset_t offset,
655               vm_offset_t start, vm_offset_t end,
656               vm_maptype_t maptype,
657               vm_prot_t prot, vm_prot_t max,
658               int cow)
659 {
660         vm_map_entry_t new_entry;
661         vm_map_entry_t prev_entry;
662         vm_map_entry_t temp_entry;
663         vm_eflags_t protoeflags;
664
665         /*
666          * Check that the start and end points are not bogus.
667          */
668
669         if ((start < map->min_offset) || (end > map->max_offset) ||
670             (start >= end))
671                 return (KERN_INVALID_ADDRESS);
672
673         /*
674          * Find the entry prior to the proposed starting address; if it's part
675          * of an existing entry, this range is bogus.
676          */
677
678         if (vm_map_lookup_entry(map, start, &temp_entry))
679                 return (KERN_NO_SPACE);
680
681         prev_entry = temp_entry;
682
683         /*
684          * Assert that the next entry doesn't overlap the end point.
685          */
686
687         if ((prev_entry->next != &map->header) &&
688             (prev_entry->next->start < end))
689                 return (KERN_NO_SPACE);
690
691         protoeflags = 0;
692
693         if (cow & MAP_COPY_ON_WRITE)
694                 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
695
696         if (cow & MAP_NOFAULT) {
697                 protoeflags |= MAP_ENTRY_NOFAULT;
698
699                 KASSERT(object == NULL,
700                         ("vm_map_insert: paradoxical MAP_NOFAULT request"));
701         }
702         if (cow & MAP_DISABLE_SYNCER)
703                 protoeflags |= MAP_ENTRY_NOSYNC;
704         if (cow & MAP_DISABLE_COREDUMP)
705                 protoeflags |= MAP_ENTRY_NOCOREDUMP;
706
707         if (object) {
708                 /*
709                  * When object is non-NULL, it could be shared with another
710                  * process.  We have to set or clear OBJ_ONEMAPPING 
711                  * appropriately.
712                  */
713                 if ((object->ref_count > 1) || (object->shadow_count != 0)) {
714                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
715                 }
716         }
717         else if ((prev_entry != &map->header) &&
718                  (prev_entry->eflags == protoeflags) &&
719                  (prev_entry->end == start) &&
720                  (prev_entry->wired_count == 0) &&
721                  prev_entry->maptype == maptype &&
722                  ((prev_entry->object.vm_object == NULL) ||
723                   vm_object_coalesce(prev_entry->object.vm_object,
724                                      OFF_TO_IDX(prev_entry->offset),
725                                      (vm_size_t)(prev_entry->end - prev_entry->start),
726                                      (vm_size_t)(end - prev_entry->end)))) {
727                 /*
728                  * We were able to extend the object.  Determine if we
729                  * can extend the previous map entry to include the 
730                  * new range as well.
731                  */
732                 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
733                     (prev_entry->protection == prot) &&
734                     (prev_entry->max_protection == max)) {
735                         map->size += (end - prev_entry->end);
736                         prev_entry->end = end;
737                         vm_map_simplify_entry(map, prev_entry, countp);
738                         return (KERN_SUCCESS);
739                 }
740
741                 /*
742                  * If we can extend the object but cannot extend the
743                  * map entry, we have to create a new map entry.  We
744                  * must bump the ref count on the extended object to
745                  * account for it.  object may be NULL.
746                  */
747                 object = prev_entry->object.vm_object;
748                 offset = prev_entry->offset +
749                         (prev_entry->end - prev_entry->start);
750                 vm_object_reference(object);
751         }
752
753         /*
754          * NOTE: if conditionals fail, object can be NULL here.  This occurs
755          * in things like the buffer map where we manage kva but do not manage
756          * backing objects.
757          */
758
759         /*
760          * Create a new entry
761          */
762
763         new_entry = vm_map_entry_create(map, countp);
764         new_entry->start = start;
765         new_entry->end = end;
766
767         new_entry->maptype = maptype;
768         new_entry->eflags = protoeflags;
769         new_entry->object.vm_object = object;
770         new_entry->offset = offset;
771         new_entry->aux.master_pde = 0;
772
773         new_entry->inheritance = VM_INHERIT_DEFAULT;
774         new_entry->protection = prot;
775         new_entry->max_protection = max;
776         new_entry->wired_count = 0;
777
778         /*
779          * Insert the new entry into the list
780          */
781
782         vm_map_entry_link(map, prev_entry, new_entry);
783         map->size += new_entry->end - new_entry->start;
784
785         /*
786          * Update the free space hint
787          */
788         if ((map->first_free == prev_entry) &&
789             (prev_entry->end >= new_entry->start)) {
790                 map->first_free = new_entry;
791         }
792
793 #if 0
794         /*
795          * Temporarily removed to avoid MAP_STACK panic, due to
796          * MAP_STACK being a huge hack.  Will be added back in
797          * when MAP_STACK (and the user stack mapping) is fixed.
798          */
799         /*
800          * It may be possible to simplify the entry
801          */
802         vm_map_simplify_entry(map, new_entry, countp);
803 #endif
804
805         /*
806          * Try to pre-populate the page table.  Mappings governed by virtual
807          * page tables cannot be prepopulated without a lot of work, so
808          * don't try.
809          */
810         if ((cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) &&
811             maptype != VM_MAPTYPE_VPAGETABLE) {
812                 pmap_object_init_pt(map->pmap, start, prot,
813                                     object, OFF_TO_IDX(offset), end - start,
814                                     cow & MAP_PREFAULT_PARTIAL);
815         }
816
817         return (KERN_SUCCESS);
818 }
819
820 /*
821  * Find sufficient space for `length' bytes in the given map, starting at
822  * `start'.  The map must be locked.  Returns 0 on success, 1 on no space.
823  *
824  * This function will returned an arbitrarily aligned pointer.  If no
825  * particular alignment is required you should pass align as 1.  Note that
826  * the map may return PAGE_SIZE aligned pointers if all the lengths used in
827  * the map are a multiple of PAGE_SIZE, even if you pass a smaller align
828  * argument.
829  *
830  * 'align' should be a power of 2 but is not required to be.
831  */
832 int
833 vm_map_findspace(
834         vm_map_t map,
835         vm_offset_t start,
836         vm_size_t length,
837         vm_offset_t align,
838         vm_offset_t *addr)
839 {
840         vm_map_entry_t entry, next;
841         vm_offset_t end;
842         vm_offset_t align_mask;
843
844         if (start < map->min_offset)
845                 start = map->min_offset;
846         if (start > map->max_offset)
847                 return (1);
848
849         /*
850          * If the alignment is not a power of 2 we will have to use
851          * a mod/division, set align_mask to a special value.
852          */
853         if ((align | (align - 1)) + 1 != (align << 1))
854                 align_mask = (vm_offset_t)-1;
855         else
856                 align_mask = align - 1;
857
858 retry:
859         /*
860          * Look for the first possible address; if there's already something
861          * at this address, we have to start after it.
862          */
863         if (start == map->min_offset) {
864                 if ((entry = map->first_free) != &map->header)
865                         start = entry->end;
866         } else {
867                 vm_map_entry_t tmp;
868
869                 if (vm_map_lookup_entry(map, start, &tmp))
870                         start = tmp->end;
871                 entry = tmp;
872         }
873
874         /*
875          * Look through the rest of the map, trying to fit a new region in the
876          * gap between existing regions, or after the very last region.
877          */
878         for (;; start = (entry = next)->end) {
879                 /*
880                  * Adjust the proposed start by the requested alignment,
881                  * be sure that we didn't wrap the address.
882                  */
883                 if (align_mask == (vm_offset_t)-1)
884                         end = ((start + align - 1) / align) * align;
885                 else
886                         end = (start + align_mask) & ~align_mask;
887                 if (end < start)
888                         return (1);
889                 start = end;
890                 /*
891                  * Find the end of the proposed new region.  Be sure we didn't
892                  * go beyond the end of the map, or wrap around the address.
893                  * Then check to see if this is the last entry or if the 
894                  * proposed end fits in the gap between this and the next
895                  * entry.
896                  */
897                 end = start + length;
898                 if (end > map->max_offset || end < start)
899                         return (1);
900                 next = entry->next;
901                 if (next == &map->header || next->start >= end)
902                         break;
903         }
904         map->hint = entry;
905         if (map == kernel_map) {
906                 vm_offset_t ksize;
907                 if ((ksize = round_page(start + length)) > kernel_vm_end) {
908                         pmap_growkernel(ksize);
909                         goto retry;
910                 }
911         }
912         *addr = start;
913         return (0);
914 }
915
916 /*
917  *      vm_map_find finds an unallocated region in the target address
918  *      map with the given length.  The search is defined to be
919  *      first-fit from the specified address; the region found is
920  *      returned in the same parameter.
921  *
922  *      If object is non-NULL, ref count must be bumped by caller
923  *      prior to making call to account for the new entry.
924  */
925 int
926 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
927             vm_offset_t *addr,  vm_size_t length,
928             boolean_t find_space,
929             vm_maptype_t maptype,
930             vm_prot_t prot, vm_prot_t max,
931             int cow)
932 {
933         vm_offset_t start;
934         int result;
935         int count;
936
937         start = *addr;
938
939         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
940         vm_map_lock(map);
941         if (find_space) {
942                 if (vm_map_findspace(map, start, length, 1, addr)) {
943                         vm_map_unlock(map);
944                         vm_map_entry_release(count);
945                         return (KERN_NO_SPACE);
946                 }
947                 start = *addr;
948         }
949         result = vm_map_insert(map, &count, object, offset,
950                                start, start + length,
951                                maptype,
952                                prot, max,
953                                cow);
954         vm_map_unlock(map);
955         vm_map_entry_release(count);
956
957         return (result);
958 }
959
960 /*
961  *      vm_map_simplify_entry:
962  *
963  *      Simplify the given map entry by merging with either neighbor.  This
964  *      routine also has the ability to merge with both neighbors.
965  *
966  *      The map must be locked.
967  *
968  *      This routine guarentees that the passed entry remains valid (though
969  *      possibly extended).  When merging, this routine may delete one or
970  *      both neighbors.  No action is taken on entries which have their
971  *      in-transition flag set.
972  */
973 void
974 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry, int *countp)
975 {
976         vm_map_entry_t next, prev;
977         vm_size_t prevsize, esize;
978
979         if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
980                 ++mycpu->gd_cnt.v_intrans_coll;
981                 return;
982         }
983
984         if (entry->maptype == VM_MAPTYPE_SUBMAP)
985                 return;
986
987         prev = entry->prev;
988         if (prev != &map->header) {
989                 prevsize = prev->end - prev->start;
990                 if ( (prev->end == entry->start) &&
991                      (prev->maptype == entry->maptype) &&
992                      (prev->object.vm_object == entry->object.vm_object) &&
993                      (!prev->object.vm_object ||
994                         (prev->offset + prevsize == entry->offset)) &&
995                      (prev->eflags == entry->eflags) &&
996                      (prev->protection == entry->protection) &&
997                      (prev->max_protection == entry->max_protection) &&
998                      (prev->inheritance == entry->inheritance) &&
999                      (prev->wired_count == entry->wired_count)) {
1000                         if (map->first_free == prev)
1001                                 map->first_free = entry;
1002                         if (map->hint == prev)
1003                                 map->hint = entry;
1004                         vm_map_entry_unlink(map, prev);
1005                         entry->start = prev->start;
1006                         entry->offset = prev->offset;
1007                         if (prev->object.vm_object)
1008                                 vm_object_deallocate(prev->object.vm_object);
1009                         vm_map_entry_dispose(map, prev, countp);
1010                 }
1011         }
1012
1013         next = entry->next;
1014         if (next != &map->header) {
1015                 esize = entry->end - entry->start;
1016                 if ((entry->end == next->start) &&
1017                     (next->maptype == entry->maptype) &&
1018                     (next->object.vm_object == entry->object.vm_object) &&
1019                      (!entry->object.vm_object ||
1020                         (entry->offset + esize == next->offset)) &&
1021                     (next->eflags == entry->eflags) &&
1022                     (next->protection == entry->protection) &&
1023                     (next->max_protection == entry->max_protection) &&
1024                     (next->inheritance == entry->inheritance) &&
1025                     (next->wired_count == entry->wired_count)) {
1026                         if (map->first_free == next)
1027                                 map->first_free = entry;
1028                         if (map->hint == next)
1029                                 map->hint = entry;
1030                         vm_map_entry_unlink(map, next);
1031                         entry->end = next->end;
1032                         if (next->object.vm_object)
1033                                 vm_object_deallocate(next->object.vm_object);
1034                         vm_map_entry_dispose(map, next, countp);
1035                 }
1036         }
1037 }
1038 /*
1039  *      vm_map_clip_start:      [ internal use only ]
1040  *
1041  *      Asserts that the given entry begins at or after
1042  *      the specified address; if necessary,
1043  *      it splits the entry into two.
1044  */
1045 #define vm_map_clip_start(map, entry, startaddr, countp) \
1046 { \
1047         if (startaddr > entry->start) \
1048                 _vm_map_clip_start(map, entry, startaddr, countp); \
1049 }
1050
1051 /*
1052  *      This routine is called only when it is known that
1053  *      the entry must be split.
1054  */
1055 static void
1056 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start, int *countp)
1057 {
1058         vm_map_entry_t new_entry;
1059
1060         /*
1061          * Split off the front portion -- note that we must insert the new
1062          * entry BEFORE this one, so that this entry has the specified
1063          * starting address.
1064          */
1065
1066         vm_map_simplify_entry(map, entry, countp);
1067
1068         /*
1069          * If there is no object backing this entry, we might as well create
1070          * one now.  If we defer it, an object can get created after the map
1071          * is clipped, and individual objects will be created for the split-up
1072          * map.  This is a bit of a hack, but is also about the best place to
1073          * put this improvement.
1074          */
1075
1076         if (entry->object.vm_object == NULL && !map->system_map) {
1077                 vm_object_t object;
1078                 object = vm_object_allocate(OBJT_DEFAULT,
1079                                 atop(entry->end - entry->start));
1080                 entry->object.vm_object = object;
1081                 entry->offset = 0;
1082         }
1083
1084         new_entry = vm_map_entry_create(map, countp);
1085         *new_entry = *entry;
1086
1087         new_entry->end = start;
1088         entry->offset += (start - entry->start);
1089         entry->start = start;
1090
1091         vm_map_entry_link(map, entry->prev, new_entry);
1092
1093         switch(entry->maptype) {
1094         case VM_MAPTYPE_NORMAL:
1095         case VM_MAPTYPE_VPAGETABLE:
1096                 vm_object_reference(new_entry->object.vm_object);
1097                 break;
1098         default:
1099                 break;
1100         }
1101 }
1102
1103 /*
1104  *      vm_map_clip_end:        [ internal use only ]
1105  *
1106  *      Asserts that the given entry ends at or before
1107  *      the specified address; if necessary,
1108  *      it splits the entry into two.
1109  */
1110
1111 #define vm_map_clip_end(map, entry, endaddr, countp) \
1112 { \
1113         if (endaddr < entry->end) \
1114                 _vm_map_clip_end(map, entry, endaddr, countp); \
1115 }
1116
1117 /*
1118  *      This routine is called only when it is known that
1119  *      the entry must be split.
1120  */
1121 static void
1122 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end, int *countp)
1123 {
1124         vm_map_entry_t new_entry;
1125
1126         /*
1127          * If there is no object backing this entry, we might as well create
1128          * one now.  If we defer it, an object can get created after the map
1129          * is clipped, and individual objects will be created for the split-up
1130          * map.  This is a bit of a hack, but is also about the best place to
1131          * put this improvement.
1132          */
1133
1134         if (entry->object.vm_object == NULL && !map->system_map) {
1135                 vm_object_t object;
1136                 object = vm_object_allocate(OBJT_DEFAULT,
1137                                 atop(entry->end - entry->start));
1138                 entry->object.vm_object = object;
1139                 entry->offset = 0;
1140         }
1141
1142         /*
1143          * Create a new entry and insert it AFTER the specified entry
1144          */
1145
1146         new_entry = vm_map_entry_create(map, countp);
1147         *new_entry = *entry;
1148
1149         new_entry->start = entry->end = end;
1150         new_entry->offset += (end - entry->start);
1151
1152         vm_map_entry_link(map, entry, new_entry);
1153
1154         switch(entry->maptype) {
1155         case VM_MAPTYPE_NORMAL:
1156         case VM_MAPTYPE_VPAGETABLE:
1157                 vm_object_reference(new_entry->object.vm_object);
1158                 break;
1159         default:
1160                 break;
1161         }
1162 }
1163
1164 /*
1165  *      VM_MAP_RANGE_CHECK:     [ internal use only ]
1166  *
1167  *      Asserts that the starting and ending region
1168  *      addresses fall within the valid range of the map.
1169  */
1170 #define VM_MAP_RANGE_CHECK(map, start, end)             \
1171                 {                                       \
1172                 if (start < vm_map_min(map))            \
1173                         start = vm_map_min(map);        \
1174                 if (end > vm_map_max(map))              \
1175                         end = vm_map_max(map);          \
1176                 if (start > end)                        \
1177                         start = end;                    \
1178                 }
1179
1180 /*
1181  *      vm_map_transition_wait: [ kernel use only ]
1182  *
1183  *      Used to block when an in-transition collison occurs.  The map
1184  *      is unlocked for the sleep and relocked before the return.
1185  */
1186 static
1187 void
1188 vm_map_transition_wait(vm_map_t map)
1189 {
1190         vm_map_unlock(map);
1191         tsleep(map, 0, "vment", 0);
1192         vm_map_lock(map);
1193 }
1194
1195 /*
1196  * CLIP_CHECK_BACK
1197  * CLIP_CHECK_FWD
1198  *
1199  *      When we do blocking operations with the map lock held it is
1200  *      possible that a clip might have occured on our in-transit entry,
1201  *      requiring an adjustment to the entry in our loop.  These macros
1202  *      help the pageable and clip_range code deal with the case.  The
1203  *      conditional costs virtually nothing if no clipping has occured.
1204  */
1205
1206 #define CLIP_CHECK_BACK(entry, save_start)              \
1207     do {                                                \
1208             while (entry->start != save_start) {        \
1209                     entry = entry->prev;                \
1210                     KASSERT(entry != &map->header, ("bad entry clip")); \
1211             }                                           \
1212     } while(0)
1213
1214 #define CLIP_CHECK_FWD(entry, save_end)                 \
1215     do {                                                \
1216             while (entry->end != save_end) {            \
1217                     entry = entry->next;                \
1218                     KASSERT(entry != &map->header, ("bad entry clip")); \
1219             }                                           \
1220     } while(0)
1221
1222
1223 /*
1224  *      vm_map_clip_range:      [ kernel use only ]
1225  *
1226  *      Clip the specified range and return the base entry.  The
1227  *      range may cover several entries starting at the returned base
1228  *      and the first and last entry in the covering sequence will be
1229  *      properly clipped to the requested start and end address.
1230  *
1231  *      If no holes are allowed you should pass the MAP_CLIP_NO_HOLES
1232  *      flag.  
1233  *
1234  *      The MAP_ENTRY_IN_TRANSITION flag will be set for the entries
1235  *      covered by the requested range.
1236  *
1237  *      The map must be exclusively locked on entry and will remain locked
1238  *      on return. If no range exists or the range contains holes and you
1239  *      specified that no holes were allowed, NULL will be returned.  This
1240  *      routine may temporarily unlock the map in order avoid a deadlock when
1241  *      sleeping.
1242  */
1243 static
1244 vm_map_entry_t
1245 vm_map_clip_range(vm_map_t map, vm_offset_t start, vm_offset_t end, 
1246         int *countp, int flags)
1247 {
1248         vm_map_entry_t start_entry;
1249         vm_map_entry_t entry;
1250
1251         /*
1252          * Locate the entry and effect initial clipping.  The in-transition
1253          * case does not occur very often so do not try to optimize it.
1254          */
1255 again:
1256         if (vm_map_lookup_entry(map, start, &start_entry) == FALSE)
1257                 return (NULL);
1258         entry = start_entry;
1259         if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1260                 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1261                 ++mycpu->gd_cnt.v_intrans_coll;
1262                 ++mycpu->gd_cnt.v_intrans_wait;
1263                 vm_map_transition_wait(map);
1264                 /*
1265                  * entry and/or start_entry may have been clipped while
1266                  * we slept, or may have gone away entirely.  We have
1267                  * to restart from the lookup.
1268                  */
1269                 goto again;
1270         }
1271         /*
1272          * Since we hold an exclusive map lock we do not have to restart
1273          * after clipping, even though clipping may block in zalloc.
1274          */
1275         vm_map_clip_start(map, entry, start, countp);
1276         vm_map_clip_end(map, entry, end, countp);
1277         entry->eflags |= MAP_ENTRY_IN_TRANSITION;
1278
1279         /*
1280          * Scan entries covered by the range.  When working on the next
1281          * entry a restart need only re-loop on the current entry which
1282          * we have already locked, since 'next' may have changed.  Also,
1283          * even though entry is safe, it may have been clipped so we
1284          * have to iterate forwards through the clip after sleeping.
1285          */
1286         while (entry->next != &map->header && entry->next->start < end) {
1287                 vm_map_entry_t next = entry->next;
1288
1289                 if (flags & MAP_CLIP_NO_HOLES) {
1290                         if (next->start > entry->end) {
1291                                 vm_map_unclip_range(map, start_entry,
1292                                         start, entry->end, countp, flags);
1293                                 return(NULL);
1294                         }
1295                 }
1296
1297                 if (next->eflags & MAP_ENTRY_IN_TRANSITION) {
1298                         vm_offset_t save_end = entry->end;
1299                         next->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1300                         ++mycpu->gd_cnt.v_intrans_coll;
1301                         ++mycpu->gd_cnt.v_intrans_wait;
1302                         vm_map_transition_wait(map);
1303
1304                         /*
1305                          * clips might have occured while we blocked.
1306                          */
1307                         CLIP_CHECK_FWD(entry, save_end);
1308                         CLIP_CHECK_BACK(start_entry, start);
1309                         continue;
1310                 }
1311                 /*
1312                  * No restart necessary even though clip_end may block, we
1313                  * are holding the map lock.
1314                  */
1315                 vm_map_clip_end(map, next, end, countp);
1316                 next->eflags |= MAP_ENTRY_IN_TRANSITION;
1317                 entry = next;
1318         }
1319         if (flags & MAP_CLIP_NO_HOLES) {
1320                 if (entry->end != end) {
1321                         vm_map_unclip_range(map, start_entry,
1322                                 start, entry->end, countp, flags);
1323                         return(NULL);
1324                 }
1325         }
1326         return(start_entry);
1327 }
1328
1329 /*
1330  *      vm_map_unclip_range:    [ kernel use only ]
1331  *
1332  *      Undo the effect of vm_map_clip_range().  You should pass the same
1333  *      flags and the same range that you passed to vm_map_clip_range().
1334  *      This code will clear the in-transition flag on the entries and
1335  *      wake up anyone waiting.  This code will also simplify the sequence 
1336  *      and attempt to merge it with entries before and after the sequence.
1337  *
1338  *      The map must be locked on entry and will remain locked on return.
1339  *
1340  *      Note that you should also pass the start_entry returned by 
1341  *      vm_map_clip_range().  However, if you block between the two calls
1342  *      with the map unlocked please be aware that the start_entry may
1343  *      have been clipped and you may need to scan it backwards to find
1344  *      the entry corresponding with the original start address.  You are
1345  *      responsible for this, vm_map_unclip_range() expects the correct
1346  *      start_entry to be passed to it and will KASSERT otherwise.
1347  */
1348 static
1349 void
1350 vm_map_unclip_range(
1351         vm_map_t map,
1352         vm_map_entry_t start_entry,
1353         vm_offset_t start,
1354         vm_offset_t end,
1355         int *countp,
1356         int flags)
1357 {
1358         vm_map_entry_t entry;
1359
1360         entry = start_entry;
1361
1362         KASSERT(entry->start == start, ("unclip_range: illegal base entry"));
1363         while (entry != &map->header && entry->start < end) {
1364                 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION, ("in-transition flag not set during unclip on: %p", entry));
1365                 KASSERT(entry->end <= end, ("unclip_range: tail wasn't clipped"));
1366                 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
1367                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
1368                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
1369                         wakeup(map);
1370                 }
1371                 entry = entry->next;
1372         }
1373
1374         /*
1375          * Simplification does not block so there is no restart case.
1376          */
1377         entry = start_entry;
1378         while (entry != &map->header && entry->start < end) {
1379                 vm_map_simplify_entry(map, entry, countp);
1380                 entry = entry->next;
1381         }
1382 }
1383
1384 /*
1385  *      vm_map_submap:          [ kernel use only ]
1386  *
1387  *      Mark the given range as handled by a subordinate map.
1388  *
1389  *      This range must have been created with vm_map_find,
1390  *      and no other operations may have been performed on this
1391  *      range prior to calling vm_map_submap.
1392  *
1393  *      Only a limited number of operations can be performed
1394  *      within this rage after calling vm_map_submap:
1395  *              vm_fault
1396  *      [Don't try vm_map_copy!]
1397  *
1398  *      To remove a submapping, one must first remove the
1399  *      range from the superior map, and then destroy the
1400  *      submap (if desired).  [Better yet, don't try it.]
1401  */
1402 int
1403 vm_map_submap(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_map_t submap)
1404 {
1405         vm_map_entry_t entry;
1406         int result = KERN_INVALID_ARGUMENT;
1407         int count;
1408
1409         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1410         vm_map_lock(map);
1411
1412         VM_MAP_RANGE_CHECK(map, start, end);
1413
1414         if (vm_map_lookup_entry(map, start, &entry)) {
1415                 vm_map_clip_start(map, entry, start, &count);
1416         } else {
1417                 entry = entry->next;
1418         }
1419
1420         vm_map_clip_end(map, entry, end, &count);
1421
1422         if ((entry->start == start) && (entry->end == end) &&
1423             ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1424             (entry->object.vm_object == NULL)) {
1425                 entry->object.sub_map = submap;
1426                 entry->maptype = VM_MAPTYPE_SUBMAP;
1427                 result = KERN_SUCCESS;
1428         }
1429         vm_map_unlock(map);
1430         vm_map_entry_release(count);
1431
1432         return (result);
1433 }
1434
1435 /*
1436  * vm_map_protect:
1437  *
1438  * Sets the protection of the specified address region in the target map. 
1439  * If "set_max" is specified, the maximum protection is to be set;
1440  * otherwise, only the current protection is affected.
1441  *
1442  * The protection is not applicable to submaps, but is applicable to normal
1443  * maps and maps governed by virtual page tables.  For example, when operating
1444  * on a virtual page table our protection basically controls how COW occurs
1445  * on the backing object, whereas the virtual page table abstraction itself
1446  * is an abstraction for userland.
1447  */
1448 int
1449 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1450                vm_prot_t new_prot, boolean_t set_max)
1451 {
1452         vm_map_entry_t current;
1453         vm_map_entry_t entry;
1454         int count;
1455
1456         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1457         vm_map_lock(map);
1458
1459         VM_MAP_RANGE_CHECK(map, start, end);
1460
1461         if (vm_map_lookup_entry(map, start, &entry)) {
1462                 vm_map_clip_start(map, entry, start, &count);
1463         } else {
1464                 entry = entry->next;
1465         }
1466
1467         /*
1468          * Make a first pass to check for protection violations.
1469          */
1470         current = entry;
1471         while ((current != &map->header) && (current->start < end)) {
1472                 if (current->maptype == VM_MAPTYPE_SUBMAP) {
1473                         vm_map_unlock(map);
1474                         vm_map_entry_release(count);
1475                         return (KERN_INVALID_ARGUMENT);
1476                 }
1477                 if ((new_prot & current->max_protection) != new_prot) {
1478                         vm_map_unlock(map);
1479                         vm_map_entry_release(count);
1480                         return (KERN_PROTECTION_FAILURE);
1481                 }
1482                 current = current->next;
1483         }
1484
1485         /*
1486          * Go back and fix up protections. [Note that clipping is not
1487          * necessary the second time.]
1488          */
1489         current = entry;
1490
1491         while ((current != &map->header) && (current->start < end)) {
1492                 vm_prot_t old_prot;
1493
1494                 vm_map_clip_end(map, current, end, &count);
1495
1496                 old_prot = current->protection;
1497                 if (set_max) {
1498                         current->protection =
1499                             (current->max_protection = new_prot) &
1500                             old_prot;
1501                 } else {
1502                         current->protection = new_prot;
1503                 }
1504
1505                 /*
1506                  * Update physical map if necessary. Worry about copy-on-write
1507                  * here -- CHECK THIS XXX
1508                  */
1509
1510                 if (current->protection != old_prot) {
1511 #define MASK(entry)     (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1512                                                         VM_PROT_ALL)
1513
1514                         pmap_protect(map->pmap, current->start,
1515                             current->end,
1516                             current->protection & MASK(current));
1517 #undef  MASK
1518                 }
1519
1520                 vm_map_simplify_entry(map, current, &count);
1521
1522                 current = current->next;
1523         }
1524
1525         vm_map_unlock(map);
1526         vm_map_entry_release(count);
1527         return (KERN_SUCCESS);
1528 }
1529
1530 /*
1531  *      vm_map_madvise:
1532  *
1533  *      This routine traverses a processes map handling the madvise
1534  *      system call.  Advisories are classified as either those effecting
1535  *      the vm_map_entry structure, or those effecting the underlying 
1536  *      objects.
1537  *
1538  *      The <value> argument is used for extended madvise calls.
1539  */
1540 int
1541 vm_map_madvise(vm_map_t map, vm_offset_t start, vm_offset_t end,
1542                int behav, off_t value)
1543 {
1544         vm_map_entry_t current, entry;
1545         int modify_map = 0;
1546         int error = 0;
1547         int count;
1548
1549         /*
1550          * Some madvise calls directly modify the vm_map_entry, in which case
1551          * we need to use an exclusive lock on the map and we need to perform 
1552          * various clipping operations.  Otherwise we only need a read-lock
1553          * on the map.
1554          */
1555
1556         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1557
1558         switch(behav) {
1559         case MADV_NORMAL:
1560         case MADV_SEQUENTIAL:
1561         case MADV_RANDOM:
1562         case MADV_NOSYNC:
1563         case MADV_AUTOSYNC:
1564         case MADV_NOCORE:
1565         case MADV_CORE:
1566         case MADV_SETMAP:
1567         case MADV_INVAL:
1568                 modify_map = 1;
1569                 vm_map_lock(map);
1570                 break;
1571         case MADV_WILLNEED:
1572         case MADV_DONTNEED:
1573         case MADV_FREE:
1574                 vm_map_lock_read(map);
1575                 break;
1576         default:
1577                 vm_map_entry_release(count);
1578                 return (EINVAL);
1579         }
1580
1581         /*
1582          * Locate starting entry and clip if necessary.
1583          */
1584
1585         VM_MAP_RANGE_CHECK(map, start, end);
1586
1587         if (vm_map_lookup_entry(map, start, &entry)) {
1588                 if (modify_map)
1589                         vm_map_clip_start(map, entry, start, &count);
1590         } else {
1591                 entry = entry->next;
1592         }
1593
1594         if (modify_map) {
1595                 /*
1596                  * madvise behaviors that are implemented in the vm_map_entry.
1597                  *
1598                  * We clip the vm_map_entry so that behavioral changes are
1599                  * limited to the specified address range.
1600                  */
1601                 for (current = entry;
1602                      (current != &map->header) && (current->start < end);
1603                      current = current->next
1604                 ) {
1605                         if (current->maptype == VM_MAPTYPE_SUBMAP)
1606                                 continue;
1607
1608                         vm_map_clip_end(map, current, end, &count);
1609
1610                         switch (behav) {
1611                         case MADV_NORMAL:
1612                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
1613                                 break;
1614                         case MADV_SEQUENTIAL:
1615                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
1616                                 break;
1617                         case MADV_RANDOM:
1618                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
1619                                 break;
1620                         case MADV_NOSYNC:
1621                                 current->eflags |= MAP_ENTRY_NOSYNC;
1622                                 break;
1623                         case MADV_AUTOSYNC:
1624                                 current->eflags &= ~MAP_ENTRY_NOSYNC;
1625                                 break;
1626                         case MADV_NOCORE:
1627                                 current->eflags |= MAP_ENTRY_NOCOREDUMP;
1628                                 break;
1629                         case MADV_CORE:
1630                                 current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
1631                                 break;
1632                         case MADV_INVAL:
1633                                 /*
1634                                  * Invalidate the related pmap entries, used
1635                                  * to flush portions of the real kernel's
1636                                  * pmap when the caller has removed or
1637                                  * modified existing mappings in a virtual
1638                                  * page table.
1639                                  */
1640                                 pmap_remove(map->pmap,
1641                                             current->start, current->end);
1642                                 break;
1643                         case MADV_SETMAP:
1644                                 /*
1645                                  * Set the page directory page for a map
1646                                  * governed by a virtual page table.  Mark
1647                                  * the entry as being governed by a virtual
1648                                  * page table if it is not.
1649                                  *
1650                                  * XXX the page directory page is stored
1651                                  * in the avail_ssize field if the map_entry.
1652                                  *
1653                                  * XXX the map simplification code does not
1654                                  * compare this field so weird things may
1655                                  * happen if you do not apply this function
1656                                  * to the entire mapping governed by the
1657                                  * virtual page table.
1658                                  */
1659                                 if (current->maptype != VM_MAPTYPE_VPAGETABLE) {
1660                                         error = EINVAL;
1661                                         break;
1662                                 }
1663                                 current->aux.master_pde = value;
1664                                 pmap_remove(map->pmap,
1665                                             current->start, current->end);
1666                                 break;
1667                         default:
1668                                 error = EINVAL;
1669                                 break;
1670                         }
1671                         vm_map_simplify_entry(map, current, &count);
1672                 }
1673                 vm_map_unlock(map);
1674         } else {
1675                 vm_pindex_t pindex;
1676                 int count;
1677
1678                 /*
1679                  * madvise behaviors that are implemented in the underlying
1680                  * vm_object.
1681                  *
1682                  * Since we don't clip the vm_map_entry, we have to clip
1683                  * the vm_object pindex and count.
1684                  *
1685                  * NOTE!  We currently do not support these functions on
1686                  * virtual page tables.
1687                  */
1688                 for (current = entry;
1689                      (current != &map->header) && (current->start < end);
1690                      current = current->next
1691                 ) {
1692                         vm_offset_t useStart;
1693
1694                         if (current->maptype != VM_MAPTYPE_NORMAL)
1695                                 continue;
1696
1697                         pindex = OFF_TO_IDX(current->offset);
1698                         count = atop(current->end - current->start);
1699                         useStart = current->start;
1700
1701                         if (current->start < start) {
1702                                 pindex += atop(start - current->start);
1703                                 count -= atop(start - current->start);
1704                                 useStart = start;
1705                         }
1706                         if (current->end > end)
1707                                 count -= atop(current->end - end);
1708
1709                         if (count <= 0)
1710                                 continue;
1711
1712                         vm_object_madvise(current->object.vm_object,
1713                                           pindex, count, behav);
1714
1715                         /*
1716                          * Try to populate the page table.  Mappings governed
1717                          * by virtual page tables cannot be pre-populated
1718                          * without a lot of work so don't try.
1719                          */
1720                         if (behav == MADV_WILLNEED &&
1721                             current->maptype != VM_MAPTYPE_VPAGETABLE) {
1722                                 pmap_object_init_pt(
1723                                     map->pmap, 
1724                                     useStart,
1725                                     current->protection,
1726                                     current->object.vm_object,
1727                                     pindex, 
1728                                     (count << PAGE_SHIFT),
1729                                     MAP_PREFAULT_MADVISE
1730                                 );
1731                         }
1732                 }
1733                 vm_map_unlock_read(map);
1734         }
1735         vm_map_entry_release(count);
1736         return(error);
1737 }       
1738
1739
1740 /*
1741  *      vm_map_inherit:
1742  *
1743  *      Sets the inheritance of the specified address
1744  *      range in the target map.  Inheritance
1745  *      affects how the map will be shared with
1746  *      child maps at the time of vm_map_fork.
1747  */
1748 int
1749 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
1750                vm_inherit_t new_inheritance)
1751 {
1752         vm_map_entry_t entry;
1753         vm_map_entry_t temp_entry;
1754         int count;
1755
1756         switch (new_inheritance) {
1757         case VM_INHERIT_NONE:
1758         case VM_INHERIT_COPY:
1759         case VM_INHERIT_SHARE:
1760                 break;
1761         default:
1762                 return (KERN_INVALID_ARGUMENT);
1763         }
1764
1765         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1766         vm_map_lock(map);
1767
1768         VM_MAP_RANGE_CHECK(map, start, end);
1769
1770         if (vm_map_lookup_entry(map, start, &temp_entry)) {
1771                 entry = temp_entry;
1772                 vm_map_clip_start(map, entry, start, &count);
1773         } else
1774                 entry = temp_entry->next;
1775
1776         while ((entry != &map->header) && (entry->start < end)) {
1777                 vm_map_clip_end(map, entry, end, &count);
1778
1779                 entry->inheritance = new_inheritance;
1780
1781                 vm_map_simplify_entry(map, entry, &count);
1782
1783                 entry = entry->next;
1784         }
1785         vm_map_unlock(map);
1786         vm_map_entry_release(count);
1787         return (KERN_SUCCESS);
1788 }
1789
1790 /*
1791  * Implement the semantics of mlock
1792  */
1793 int
1794 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t real_end,
1795     boolean_t new_pageable)
1796 {
1797         vm_map_entry_t entry;
1798         vm_map_entry_t start_entry;
1799         vm_offset_t end;
1800         int rv = KERN_SUCCESS;
1801         int count;
1802
1803         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1804         vm_map_lock(map);
1805         VM_MAP_RANGE_CHECK(map, start, real_end);
1806         end = real_end;
1807
1808         start_entry = vm_map_clip_range(map, start, end, &count, MAP_CLIP_NO_HOLES);
1809         if (start_entry == NULL) {
1810                 vm_map_unlock(map);
1811                 vm_map_entry_release(count);
1812                 return (KERN_INVALID_ADDRESS);
1813         }
1814
1815         if (new_pageable == 0) {
1816                 entry = start_entry;
1817                 while ((entry != &map->header) && (entry->start < end)) {
1818                         vm_offset_t save_start;
1819                         vm_offset_t save_end;
1820
1821                         /*
1822                          * Already user wired or hard wired (trivial cases)
1823                          */
1824                         if (entry->eflags & MAP_ENTRY_USER_WIRED) {
1825                                 entry = entry->next;
1826                                 continue;
1827                         }
1828                         if (entry->wired_count != 0) {
1829                                 entry->wired_count++;
1830                                 entry->eflags |= MAP_ENTRY_USER_WIRED;
1831                                 entry = entry->next;
1832                                 continue;
1833                         }
1834
1835                         /*
1836                          * A new wiring requires instantiation of appropriate
1837                          * management structures and the faulting in of the
1838                          * page.
1839                          */
1840                         if (entry->maptype != VM_MAPTYPE_SUBMAP) {
1841                                 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
1842                                 if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) {
1843
1844                                         vm_object_shadow(&entry->object.vm_object,
1845                                             &entry->offset,
1846                                             atop(entry->end - entry->start));
1847                                         entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
1848
1849                                 } else if (entry->object.vm_object == NULL &&
1850                                            !map->system_map) {
1851
1852                                         entry->object.vm_object =
1853                                             vm_object_allocate(OBJT_DEFAULT,
1854                                                 atop(entry->end - entry->start));
1855                                         entry->offset = (vm_offset_t) 0;
1856
1857                                 }
1858                         }
1859                         entry->wired_count++;
1860                         entry->eflags |= MAP_ENTRY_USER_WIRED;
1861
1862                         /*
1863                          * Now fault in the area.  Note that vm_fault_wire()
1864                          * may release the map lock temporarily, it will be
1865                          * relocked on return.  The in-transition
1866                          * flag protects the entries. 
1867                          */
1868                         save_start = entry->start;
1869                         save_end = entry->end;
1870                         rv = vm_fault_wire(map, entry, TRUE);
1871                         if (rv) {
1872                                 CLIP_CHECK_BACK(entry, save_start);
1873                                 for (;;) {
1874                                         KASSERT(entry->wired_count == 1, ("bad wired_count on entry"));
1875                                         entry->eflags &= ~MAP_ENTRY_USER_WIRED;
1876                                         entry->wired_count = 0;
1877                                         if (entry->end == save_end)
1878                                                 break;
1879                                         entry = entry->next;
1880                                         KASSERT(entry != &map->header, ("bad entry clip during backout"));
1881                                 }
1882                                 end = save_start;       /* unwire the rest */
1883                                 break;
1884                         }
1885                         /*
1886                          * note that even though the entry might have been
1887                          * clipped, the USER_WIRED flag we set prevents
1888                          * duplication so we do not have to do a 
1889                          * clip check.
1890                          */
1891                         entry = entry->next;
1892                 }
1893
1894                 /*
1895                  * If we failed fall through to the unwiring section to
1896                  * unwire what we had wired so far.  'end' has already
1897                  * been adjusted.
1898                  */
1899                 if (rv)
1900                         new_pageable = 1;
1901
1902                 /*
1903                  * start_entry might have been clipped if we unlocked the
1904                  * map and blocked.  No matter how clipped it has gotten
1905                  * there should be a fragment that is on our start boundary.
1906                  */
1907                 CLIP_CHECK_BACK(start_entry, start);
1908         }
1909
1910         /*
1911          * Deal with the unwiring case.
1912          */
1913         if (new_pageable) {
1914                 /*
1915                  * This is the unwiring case.  We must first ensure that the
1916                  * range to be unwired is really wired down.  We know there
1917                  * are no holes.
1918                  */
1919                 entry = start_entry;
1920                 while ((entry != &map->header) && (entry->start < end)) {
1921                         if ((entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
1922                                 rv = KERN_INVALID_ARGUMENT;
1923                                 goto done;
1924                         }
1925                         KASSERT(entry->wired_count != 0, ("wired count was 0 with USER_WIRED set! %p", entry));
1926                         entry = entry->next;
1927                 }
1928
1929                 /*
1930                  * Now decrement the wiring count for each region. If a region
1931                  * becomes completely unwired, unwire its physical pages and
1932                  * mappings.
1933                  */
1934                 /*
1935                  * The map entries are processed in a loop, checking to
1936                  * make sure the entry is wired and asserting it has a wired
1937                  * count. However, another loop was inserted more-or-less in
1938                  * the middle of the unwiring path. This loop picks up the
1939                  * "entry" loop variable from the first loop without first
1940                  * setting it to start_entry. Naturally, the secound loop
1941                  * is never entered and the pages backing the entries are
1942                  * never unwired. This can lead to a leak of wired pages.
1943                  */
1944                 entry = start_entry;
1945                 while ((entry != &map->header) && (entry->start < end)) {
1946                         KASSERT(entry->eflags & MAP_ENTRY_USER_WIRED,
1947                                 ("expected USER_WIRED on entry %p", entry));
1948                         entry->eflags &= ~MAP_ENTRY_USER_WIRED;
1949                         entry->wired_count--;
1950                         if (entry->wired_count == 0)
1951                                 vm_fault_unwire(map, entry);
1952                         entry = entry->next;
1953                 }
1954         }
1955 done:
1956         vm_map_unclip_range(map, start_entry, start, real_end, &count,
1957                 MAP_CLIP_NO_HOLES);
1958         map->timestamp++;
1959         vm_map_unlock(map);
1960         vm_map_entry_release(count);
1961         return (rv);
1962 }
1963
1964 /*
1965  *      vm_map_wire:
1966  *
1967  *      Sets the pageability of the specified address
1968  *      range in the target map.  Regions specified
1969  *      as not pageable require locked-down physical
1970  *      memory and physical page maps.
1971  *
1972  *      The map must not be locked, but a reference
1973  *      must remain to the map throughout the call.
1974  *
1975  *      This function may be called via the zalloc path and must properly
1976  *      reserve map entries for kernel_map.
1977  */
1978 int
1979 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t real_end, int kmflags)
1980 {
1981         vm_map_entry_t entry;
1982         vm_map_entry_t start_entry;
1983         vm_offset_t end;
1984         int rv = KERN_SUCCESS;
1985         int count;
1986
1987         if (kmflags & KM_KRESERVE)
1988                 count = vm_map_entry_kreserve(MAP_RESERVE_COUNT);
1989         else
1990                 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1991         vm_map_lock(map);
1992         VM_MAP_RANGE_CHECK(map, start, real_end);
1993         end = real_end;
1994
1995         start_entry = vm_map_clip_range(map, start, end, &count, MAP_CLIP_NO_HOLES);
1996         if (start_entry == NULL) {
1997                 vm_map_unlock(map);
1998                 rv = KERN_INVALID_ADDRESS;
1999                 goto failure;
2000         }
2001         if ((kmflags & KM_PAGEABLE) == 0) {
2002                 /*
2003                  * Wiring.  
2004                  *
2005                  * 1.  Holding the write lock, we create any shadow or zero-fill
2006                  * objects that need to be created. Then we clip each map
2007                  * entry to the region to be wired and increment its wiring
2008                  * count.  We create objects before clipping the map entries
2009                  * to avoid object proliferation.
2010                  *
2011                  * 2.  We downgrade to a read lock, and call vm_fault_wire to
2012                  * fault in the pages for any newly wired area (wired_count is
2013                  * 1).
2014                  *
2015                  * Downgrading to a read lock for vm_fault_wire avoids a 
2016                  * possible deadlock with another process that may have faulted
2017                  * on one of the pages to be wired (it would mark the page busy,
2018                  * blocking us, then in turn block on the map lock that we
2019                  * hold).  Because of problems in the recursive lock package,
2020                  * we cannot upgrade to a write lock in vm_map_lookup.  Thus,
2021                  * any actions that require the write lock must be done
2022                  * beforehand.  Because we keep the read lock on the map, the
2023                  * copy-on-write status of the entries we modify here cannot
2024                  * change.
2025                  */
2026
2027                 entry = start_entry;
2028                 while ((entry != &map->header) && (entry->start < end)) {
2029                         /*
2030                          * Trivial case if the entry is already wired
2031                          */
2032                         if (entry->wired_count) {
2033                                 entry->wired_count++;
2034                                 entry = entry->next;
2035                                 continue;
2036                         }
2037
2038                         /*
2039                          * The entry is being newly wired, we have to setup
2040                          * appropriate management structures.  A shadow 
2041                          * object is required for a copy-on-write region,
2042                          * or a normal object for a zero-fill region.  We
2043                          * do not have to do this for entries that point to sub
2044                          * maps because we won't hold the lock on the sub map.
2045                          */
2046                         if (entry->maptype != VM_MAPTYPE_SUBMAP) {
2047                                 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
2048                                 if (copyflag &&
2049                                     ((entry->protection & VM_PROT_WRITE) != 0)) {
2050
2051                                         vm_object_shadow(&entry->object.vm_object,
2052                                             &entry->offset,
2053                                             atop(entry->end - entry->start));
2054                                         entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
2055                                 } else if (entry->object.vm_object == NULL &&
2056                                            !map->system_map) {
2057                                         entry->object.vm_object =
2058                                             vm_object_allocate(OBJT_DEFAULT,
2059                                                 atop(entry->end - entry->start));
2060                                         entry->offset = (vm_offset_t) 0;
2061                                 }
2062                         }
2063
2064                         entry->wired_count++;
2065                         entry = entry->next;
2066                 }
2067
2068                 /*
2069                  * Pass 2.
2070                  */
2071
2072                 /*
2073                  * HACK HACK HACK HACK
2074                  *
2075                  * Unlock the map to avoid deadlocks.  The in-transit flag
2076                  * protects us from most changes but note that
2077                  * clipping may still occur.  To prevent clipping from
2078                  * occuring after the unlock, except for when we are
2079                  * blocking in vm_fault_wire, we must run in a critical
2080                  * section, otherwise our accesses to entry->start and 
2081                  * entry->end could be corrupted.  We have to enter the
2082                  * critical section prior to unlocking so start_entry does
2083                  * not change out from under us at the very beginning of the
2084                  * loop.
2085                  *
2086                  * HACK HACK HACK HACK
2087                  */
2088
2089                 crit_enter();
2090
2091                 entry = start_entry;
2092                 while (entry != &map->header && entry->start < end) {
2093                         /*
2094                          * If vm_fault_wire fails for any page we need to undo
2095                          * what has been done.  We decrement the wiring count
2096                          * for those pages which have not yet been wired (now)
2097                          * and unwire those that have (later).
2098                          */
2099                         vm_offset_t save_start = entry->start;
2100                         vm_offset_t save_end = entry->end;
2101
2102                         if (entry->wired_count == 1)
2103                                 rv = vm_fault_wire(map, entry, FALSE);
2104                         if (rv) {
2105                                 CLIP_CHECK_BACK(entry, save_start);
2106                                 for (;;) {
2107                                         KASSERT(entry->wired_count == 1, ("wired_count changed unexpectedly"));
2108                                         entry->wired_count = 0;
2109                                         if (entry->end == save_end)
2110                                                 break;
2111                                         entry = entry->next;
2112                                         KASSERT(entry != &map->header, ("bad entry clip during backout"));
2113                                 }
2114                                 end = save_start;
2115                                 break;
2116                         }
2117                         CLIP_CHECK_FWD(entry, save_end);
2118                         entry = entry->next;
2119                 }
2120                 crit_exit();
2121
2122                 /*
2123                  * If a failure occured undo everything by falling through
2124                  * to the unwiring code.  'end' has already been adjusted
2125                  * appropriately.
2126                  */
2127                 if (rv)
2128                         kmflags |= KM_PAGEABLE;
2129
2130                 /*
2131                  * start_entry is still IN_TRANSITION but may have been 
2132                  * clipped since vm_fault_wire() unlocks and relocks the
2133                  * map.  No matter how clipped it has gotten there should
2134                  * be a fragment that is on our start boundary.
2135                  */
2136                 CLIP_CHECK_BACK(start_entry, start);
2137         }
2138
2139         if (kmflags & KM_PAGEABLE) {
2140                 /*
2141                  * This is the unwiring case.  We must first ensure that the
2142                  * range to be unwired is really wired down.  We know there
2143                  * are no holes.
2144                  */
2145                 entry = start_entry;
2146                 while ((entry != &map->header) && (entry->start < end)) {
2147                         if (entry->wired_count == 0) {
2148                                 rv = KERN_INVALID_ARGUMENT;
2149                                 goto done;
2150                         }
2151                         entry = entry->next;
2152                 }
2153
2154                 /*
2155                  * Now decrement the wiring count for each region. If a region
2156                  * becomes completely unwired, unwire its physical pages and
2157                  * mappings.
2158                  */
2159                 entry = start_entry;
2160                 while ((entry != &map->header) && (entry->start < end)) {
2161                         entry->wired_count--;
2162                         if (entry->wired_count == 0)
2163                                 vm_fault_unwire(map, entry);
2164                         entry = entry->next;
2165                 }
2166         }
2167 done:
2168         vm_map_unclip_range(map, start_entry, start, real_end, &count,
2169                 MAP_CLIP_NO_HOLES);
2170         map->timestamp++;
2171         vm_map_unlock(map);
2172 failure:
2173         if (kmflags & KM_KRESERVE)
2174                 vm_map_entry_krelease(count);
2175         else
2176                 vm_map_entry_release(count);
2177         return (rv);
2178 }
2179
2180 /*
2181  * vm_map_set_wired_quick()
2182  *
2183  *      Mark a newly allocated address range as wired but do not fault in
2184  *      the pages.  The caller is expected to load the pages into the object.
2185  *
2186  *      The map must be locked on entry and will remain locked on return.
2187  */
2188 void
2189 vm_map_set_wired_quick(vm_map_t map, vm_offset_t addr, vm_size_t size, int *countp)
2190 {
2191         vm_map_entry_t scan;
2192         vm_map_entry_t entry;
2193
2194         entry = vm_map_clip_range(map, addr, addr + size, countp, MAP_CLIP_NO_HOLES);
2195         for (scan = entry; scan != &map->header && scan->start < addr + size; scan = scan->next) {
2196             KKASSERT(entry->wired_count == 0);
2197             entry->wired_count = 1;                                              
2198         }
2199         vm_map_unclip_range(map, entry, addr, addr + size, countp, MAP_CLIP_NO_HOLES);
2200 }
2201
2202 /*
2203  * vm_map_clean
2204  *
2205  * Push any dirty cached pages in the address range to their pager.
2206  * If syncio is TRUE, dirty pages are written synchronously.
2207  * If invalidate is TRUE, any cached pages are freed as well.
2208  *
2209  * Returns an error if any part of the specified range is not mapped.
2210  */
2211 int
2212 vm_map_clean(vm_map_t map, vm_offset_t start, vm_offset_t end, boolean_t syncio,
2213     boolean_t invalidate)
2214 {
2215         vm_map_entry_t current;
2216         vm_map_entry_t entry;
2217         vm_size_t size;
2218         vm_object_t object;
2219         vm_ooffset_t offset;
2220
2221         vm_map_lock_read(map);
2222         VM_MAP_RANGE_CHECK(map, start, end);
2223         if (!vm_map_lookup_entry(map, start, &entry)) {
2224                 vm_map_unlock_read(map);
2225                 return (KERN_INVALID_ADDRESS);
2226         }
2227         /*
2228          * Make a first pass to check for holes.
2229          */
2230         for (current = entry; current->start < end; current = current->next) {
2231                 if (current->maptype == VM_MAPTYPE_SUBMAP) {
2232                         vm_map_unlock_read(map);
2233                         return (KERN_INVALID_ARGUMENT);
2234                 }
2235                 if (end > current->end &&
2236                     (current->next == &map->header ||
2237                         current->end != current->next->start)) {
2238                         vm_map_unlock_read(map);
2239                         return (KERN_INVALID_ADDRESS);
2240                 }
2241         }
2242
2243         if (invalidate)
2244                 pmap_remove(vm_map_pmap(map), start, end);
2245         /*
2246          * Make a second pass, cleaning/uncaching pages from the indicated
2247          * objects as we go.
2248          */
2249         for (current = entry; current->start < end; current = current->next) {
2250                 offset = current->offset + (start - current->start);
2251                 size = (end <= current->end ? end : current->end) - start;
2252                 if (current->maptype == VM_MAPTYPE_SUBMAP) {
2253                         vm_map_t smap;
2254                         vm_map_entry_t tentry;
2255                         vm_size_t tsize;
2256
2257                         smap = current->object.sub_map;
2258                         vm_map_lock_read(smap);
2259                         vm_map_lookup_entry(smap, offset, &tentry);
2260                         tsize = tentry->end - offset;
2261                         if (tsize < size)
2262                                 size = tsize;
2263                         object = tentry->object.vm_object;
2264                         offset = tentry->offset + (offset - tentry->start);
2265                         vm_map_unlock_read(smap);
2266                 } else {
2267                         object = current->object.vm_object;
2268                 }
2269                 /*
2270                  * Note that there is absolutely no sense in writing out
2271                  * anonymous objects, so we track down the vnode object
2272                  * to write out.
2273                  * We invalidate (remove) all pages from the address space
2274                  * anyway, for semantic correctness.
2275                  *
2276                  * note: certain anonymous maps, such as MAP_NOSYNC maps,
2277                  * may start out with a NULL object.
2278                  */
2279                 while (object && object->backing_object) {
2280                         offset += object->backing_object_offset;
2281                         object = object->backing_object;
2282                         if (object->size < OFF_TO_IDX( offset + size))
2283                                 size = IDX_TO_OFF(object->size) - offset;
2284                 }
2285                 if (object && (object->type == OBJT_VNODE) && 
2286                     (current->protection & VM_PROT_WRITE)) {
2287                         /*
2288                          * Flush pages if writing is allowed, invalidate them
2289                          * if invalidation requested.  Pages undergoing I/O
2290                          * will be ignored by vm_object_page_remove().
2291                          *
2292                          * We cannot lock the vnode and then wait for paging
2293                          * to complete without deadlocking against vm_fault.
2294                          * Instead we simply call vm_object_page_remove() and
2295                          * allow it to block internally on a page-by-page 
2296                          * basis when it encounters pages undergoing async 
2297                          * I/O.
2298                          */
2299                         int flags;
2300
2301                         vm_object_reference(object);
2302                         vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY);
2303                         flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
2304                         flags |= invalidate ? OBJPC_INVAL : 0;
2305
2306                         /*
2307                          * When operating on a virtual page table just
2308                          * flush the whole object.  XXX we probably ought
2309                          * to 
2310                          */
2311                         switch(current->maptype) {
2312                         case VM_MAPTYPE_NORMAL:
2313                                 vm_object_page_clean(object,
2314                                     OFF_TO_IDX(offset),
2315                                     OFF_TO_IDX(offset + size + PAGE_MASK),
2316                                     flags);
2317                                 break;
2318                         case VM_MAPTYPE_VPAGETABLE:
2319                                 vm_object_page_clean(object, 0, 0, flags);
2320                                 break;
2321                         }
2322                         vn_unlock(((struct vnode *)object->handle));
2323                         vm_object_deallocate(object);
2324                 }
2325                 if (object && invalidate &&
2326                    ((object->type == OBJT_VNODE) ||
2327                     (object->type == OBJT_DEVICE))) {
2328                         int clean_only = 
2329                                 (object->type == OBJT_DEVICE) ? FALSE : TRUE;
2330                         vm_object_reference(object);
2331                         switch(current->maptype) {
2332                         case VM_MAPTYPE_NORMAL:
2333                                 vm_object_page_remove(object,
2334                                     OFF_TO_IDX(offset),
2335                                     OFF_TO_IDX(offset + size + PAGE_MASK),
2336                                     clean_only);
2337                                 break;
2338                         case VM_MAPTYPE_VPAGETABLE:
2339                                 vm_object_page_remove(object, 0, 0, clean_only);
2340                                 break;
2341                         }
2342                         vm_object_deallocate(object);
2343                 }
2344                 start += size;
2345         }
2346
2347         vm_map_unlock_read(map);
2348         return (KERN_SUCCESS);
2349 }
2350
2351 /*
2352  *      vm_map_entry_unwire:    [ internal use only ]
2353  *
2354  *      Make the region specified by this entry pageable.
2355  *
2356  *      The map in question should be locked.
2357  *      [This is the reason for this routine's existence.]
2358  */
2359 static void 
2360 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
2361 {
2362         entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2363         entry->wired_count = 0;
2364         vm_fault_unwire(map, entry);
2365 }
2366
2367 /*
2368  *      vm_map_entry_delete:    [ internal use only ]
2369  *
2370  *      Deallocate the given entry from the target map.
2371  */
2372 static void
2373 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry, int *countp)
2374 {
2375         vm_map_entry_unlink(map, entry);
2376         map->size -= entry->end - entry->start;
2377
2378         switch(entry->maptype) {
2379         case VM_MAPTYPE_NORMAL:
2380         case VM_MAPTYPE_VPAGETABLE:
2381                 vm_object_deallocate(entry->object.vm_object);
2382                 break;
2383         default:
2384                 break;
2385         }
2386
2387         vm_map_entry_dispose(map, entry, countp);
2388 }
2389
2390 /*
2391  *      vm_map_delete:  [ internal use only ]
2392  *
2393  *      Deallocates the given address range from the target
2394  *      map.
2395  */
2396 int
2397 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end, int *countp)
2398 {
2399         vm_object_t object;
2400         vm_map_entry_t entry;
2401         vm_map_entry_t first_entry;
2402
2403 again:
2404         /*
2405          * Find the start of the region, and clip it.  Set entry to point
2406          * at the first record containing the requested address or, if no
2407          * such record exists, the next record with a greater address.  The
2408          * loop will run from this point until a record beyond the termination
2409          * address is encountered.
2410          *
2411          * map->hint must be adjusted to not point to anything we delete,
2412          * so set it to the entry prior to the one being deleted.
2413          *
2414          * GGG see other GGG comment.
2415          */
2416         if (vm_map_lookup_entry(map, start, &first_entry)) {
2417                 entry = first_entry;
2418                 vm_map_clip_start(map, entry, start, countp);
2419                 map->hint = entry->prev;        /* possible problem XXX */
2420         } else {
2421                 map->hint = first_entry;        /* possible problem XXX */
2422                 entry = first_entry->next;
2423         }
2424
2425         /*
2426          * If a hole opens up prior to the current first_free then
2427          * adjust first_free.  As with map->hint, map->first_free
2428          * cannot be left set to anything we might delete.
2429          */
2430         if (entry == &map->header) {
2431                 map->first_free = &map->header;
2432         } else if (map->first_free->start >= start) {
2433                 map->first_free = entry->prev;
2434         }
2435
2436         /*
2437          * Step through all entries in this region
2438          */
2439
2440         while ((entry != &map->header) && (entry->start < end)) {
2441                 vm_map_entry_t next;
2442                 vm_offset_t s, e;
2443                 vm_pindex_t offidxstart, offidxend, count;
2444
2445                 /*
2446                  * If we hit an in-transition entry we have to sleep and
2447                  * retry.  It's easier (and not really slower) to just retry
2448                  * since this case occurs so rarely and the hint is already
2449                  * pointing at the right place.  We have to reset the
2450                  * start offset so as not to accidently delete an entry
2451                  * another process just created in vacated space.
2452                  */
2453                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2454                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2455                         start = entry->start;
2456                         ++mycpu->gd_cnt.v_intrans_coll;
2457                         ++mycpu->gd_cnt.v_intrans_wait;
2458                         vm_map_transition_wait(map);
2459                         goto again;
2460                 }
2461                 vm_map_clip_end(map, entry, end, countp);
2462
2463                 s = entry->start;
2464                 e = entry->end;
2465                 next = entry->next;
2466
2467                 offidxstart = OFF_TO_IDX(entry->offset);
2468                 count = OFF_TO_IDX(e - s);
2469                 object = entry->object.vm_object;
2470
2471                 /*
2472                  * Unwire before removing addresses from the pmap; otherwise,
2473                  * unwiring will put the entries back in the pmap.
2474                  */
2475                 if (entry->wired_count != 0)
2476                         vm_map_entry_unwire(map, entry);
2477
2478                 offidxend = offidxstart + count;
2479
2480                 if ((object == kernel_object) || (object == kmem_object)) {
2481                         vm_object_page_remove(object, offidxstart, offidxend, FALSE);
2482                 } else {
2483                         pmap_remove(map->pmap, s, e);
2484                         if (object != NULL &&
2485                             object->ref_count != 1 &&
2486                             (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING &&
2487                             (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
2488                                 vm_object_collapse(object);
2489                                 vm_object_page_remove(object, offidxstart, offidxend, FALSE);
2490                                 if (object->type == OBJT_SWAP) {
2491                                         swap_pager_freespace(object, offidxstart, count);
2492                                 }
2493                                 if (offidxend >= object->size &&
2494                                     offidxstart < object->size) {
2495                                         object->size = offidxstart;
2496                                 }
2497                         }
2498                 }
2499
2500                 /*
2501                  * Delete the entry (which may delete the object) only after
2502                  * removing all pmap entries pointing to its pages.
2503                  * (Otherwise, its page frames may be reallocated, and any
2504                  * modify bits will be set in the wrong object!)
2505                  */
2506                 vm_map_entry_delete(map, entry, countp);
2507                 entry = next;
2508         }
2509         return (KERN_SUCCESS);
2510 }
2511
2512 /*
2513  *      vm_map_remove:
2514  *
2515  *      Remove the given address range from the target map.
2516  *      This is the exported form of vm_map_delete.
2517  */
2518 int
2519 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
2520 {
2521         int result;
2522         int count;
2523
2524         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2525         vm_map_lock(map);
2526         VM_MAP_RANGE_CHECK(map, start, end);
2527         result = vm_map_delete(map, start, end, &count);
2528         vm_map_unlock(map);
2529         vm_map_entry_release(count);
2530
2531         return (result);
2532 }
2533
2534 /*
2535  *      vm_map_check_protection:
2536  *
2537  *      Assert that the target map allows the specified
2538  *      privilege on the entire address region given.
2539  *      The entire region must be allocated.
2540  */
2541 boolean_t
2542 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
2543                         vm_prot_t protection)
2544 {
2545         vm_map_entry_t entry;
2546         vm_map_entry_t tmp_entry;
2547
2548         if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
2549                 return (FALSE);
2550         }
2551         entry = tmp_entry;
2552
2553         while (start < end) {
2554                 if (entry == &map->header) {
2555                         return (FALSE);
2556                 }
2557                 /*
2558                  * No holes allowed!
2559                  */
2560
2561                 if (start < entry->start) {
2562                         return (FALSE);
2563                 }
2564                 /*
2565                  * Check protection associated with entry.
2566                  */
2567
2568                 if ((entry->protection & protection) != protection) {
2569                         return (FALSE);
2570                 }
2571                 /* go to next entry */
2572
2573                 start = entry->end;
2574                 entry = entry->next;
2575         }
2576         return (TRUE);
2577 }
2578
2579 /*
2580  * Split the pages in a map entry into a new object.  This affords
2581  * easier removal of unused pages, and keeps object inheritance from
2582  * being a negative impact on memory usage.
2583  */
2584 static void
2585 vm_map_split(vm_map_entry_t entry)
2586 {
2587         vm_page_t m;
2588         vm_object_t orig_object, new_object, source;
2589         vm_offset_t s, e;
2590         vm_pindex_t offidxstart, offidxend, idx;
2591         vm_size_t size;
2592         vm_ooffset_t offset;
2593
2594         orig_object = entry->object.vm_object;
2595         if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP)
2596                 return;
2597         if (orig_object->ref_count <= 1)
2598                 return;
2599
2600         offset = entry->offset;
2601         s = entry->start;
2602         e = entry->end;
2603
2604         offidxstart = OFF_TO_IDX(offset);
2605         offidxend = offidxstart + OFF_TO_IDX(e - s);
2606         size = offidxend - offidxstart;
2607
2608         new_object = vm_pager_allocate(orig_object->type, NULL,
2609                                        IDX_TO_OFF(size), VM_PROT_ALL, 0);
2610         if (new_object == NULL)
2611                 return;
2612
2613         source = orig_object->backing_object;
2614         if (source != NULL) {
2615                 vm_object_reference(source);    /* Referenced by new_object */
2616                 LIST_INSERT_HEAD(&source->shadow_head,
2617                                   new_object, shadow_list);
2618                 vm_object_clear_flag(source, OBJ_ONEMAPPING);
2619                 new_object->backing_object_offset = 
2620                         orig_object->backing_object_offset + IDX_TO_OFF(offidxstart);
2621                 new_object->backing_object = source;
2622                 source->shadow_count++;
2623                 source->generation++;
2624         }
2625
2626         for (idx = 0; idx < size; idx++) {
2627                 vm_page_t m;
2628
2629                 /*
2630                  * A critical section is required to avoid a race between
2631                  * the lookup and an interrupt/unbusy/free and our busy
2632                  * check.
2633                  */
2634                 crit_enter();
2635         retry:
2636                 m = vm_page_lookup(orig_object, offidxstart + idx);
2637                 if (m == NULL) {
2638                         crit_exit();
2639                         continue;
2640                 }
2641
2642                 /*
2643                  * We must wait for pending I/O to complete before we can
2644                  * rename the page.
2645                  *
2646                  * We do not have to VM_PROT_NONE the page as mappings should
2647                  * not be changed by this operation.
2648                  */
2649                 if (vm_page_sleep_busy(m, TRUE, "spltwt"))
2650                         goto retry;
2651                 vm_page_busy(m);
2652                 vm_page_rename(m, new_object, idx);
2653                 /* page automatically made dirty by rename and cache handled */
2654                 vm_page_busy(m);
2655                 crit_exit();
2656         }
2657
2658         if (orig_object->type == OBJT_SWAP) {
2659                 vm_object_pip_add(orig_object, 1);
2660                 /*
2661                  * copy orig_object pages into new_object
2662                  * and destroy unneeded pages in
2663                  * shadow object.
2664                  */
2665                 swap_pager_copy(orig_object, new_object, offidxstart, 0);
2666                 vm_object_pip_wakeup(orig_object);
2667         }
2668
2669         /*
2670          * Wakeup the pages we played with.  No spl protection is needed
2671          * for a simple wakeup.
2672          */
2673         for (idx = 0; idx < size; idx++) {
2674                 m = vm_page_lookup(new_object, idx);
2675                 if (m)
2676                         vm_page_wakeup(m);
2677         }
2678
2679         entry->object.vm_object = new_object;
2680         entry->offset = 0LL;
2681         vm_object_deallocate(orig_object);
2682 }
2683
2684 /*
2685  *      vm_map_copy_entry:
2686  *
2687  *      Copies the contents of the source entry to the destination
2688  *      entry.  The entries *must* be aligned properly.
2689  */
2690 static void
2691 vm_map_copy_entry(vm_map_t src_map, vm_map_t dst_map,
2692         vm_map_entry_t src_entry, vm_map_entry_t dst_entry)
2693 {
2694         vm_object_t src_object;
2695
2696         if (dst_entry->maptype == VM_MAPTYPE_SUBMAP)
2697                 return;
2698         if (src_entry->maptype == VM_MAPTYPE_SUBMAP)
2699                 return;
2700
2701         if (src_entry->wired_count == 0) {
2702                 /*
2703                  * If the source entry is marked needs_copy, it is already
2704                  * write-protected.
2705                  */
2706                 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
2707                         pmap_protect(src_map->pmap,
2708                             src_entry->start,
2709                             src_entry->end,
2710                             src_entry->protection & ~VM_PROT_WRITE);
2711                 }
2712
2713                 /*
2714                  * Make a copy of the object.
2715                  */
2716                 if ((src_object = src_entry->object.vm_object) != NULL) {
2717                         if ((src_object->handle == NULL) &&
2718                                 (src_object->type == OBJT_DEFAULT ||
2719                                  src_object->type == OBJT_SWAP)) {
2720                                 vm_object_collapse(src_object);
2721                                 if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
2722                                         vm_map_split(src_entry);
2723                                         src_object = src_entry->object.vm_object;
2724                                 }
2725                         }
2726
2727                         vm_object_reference(src_object);
2728                         vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
2729                         dst_entry->object.vm_object = src_object;
2730                         src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2731                         dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2732                         dst_entry->offset = src_entry->offset;
2733                 } else {
2734                         dst_entry->object.vm_object = NULL;
2735                         dst_entry->offset = 0;
2736                 }
2737
2738                 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
2739                     dst_entry->end - dst_entry->start, src_entry->start);
2740         } else {
2741                 /*
2742                  * Of course, wired down pages can't be set copy-on-write.
2743                  * Cause wired pages to be copied into the new map by
2744                  * simulating faults (the new pages are pageable)
2745                  */
2746                 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
2747         }
2748 }
2749
2750 /*
2751  * vmspace_fork:
2752  * Create a new process vmspace structure and vm_map
2753  * based on those of an existing process.  The new map
2754  * is based on the old map, according to the inheritance
2755  * values on the regions in that map.
2756  *
2757  * The source map must not be locked.
2758  */
2759 struct vmspace *
2760 vmspace_fork(struct vmspace *vm1)
2761 {
2762         struct vmspace *vm2;
2763         vm_map_t old_map = &vm1->vm_map;
2764         vm_map_t new_map;
2765         vm_map_entry_t old_entry;
2766         vm_map_entry_t new_entry;
2767         vm_object_t object;
2768         int count;
2769
2770         vm_map_lock(old_map);
2771         old_map->infork = 1;
2772
2773         /*
2774          * XXX Note: upcalls are not copied.
2775          */
2776         vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
2777         bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
2778             (caddr_t)&vm1->vm_endcopy - (caddr_t)&vm1->vm_startcopy);
2779         new_map = &vm2->vm_map; /* XXX */
2780         new_map->timestamp = 1;
2781
2782         count = 0;
2783         old_entry = old_map->header.next;
2784         while (old_entry != &old_map->header) {
2785                 ++count;
2786                 old_entry = old_entry->next;
2787         }
2788
2789         count = vm_map_entry_reserve(count + MAP_RESERVE_COUNT);
2790
2791         old_entry = old_map->header.next;
2792         while (old_entry != &old_map->header) {
2793                 if (old_entry->maptype == VM_MAPTYPE_SUBMAP)
2794                         panic("vm_map_fork: encountered a submap");
2795
2796                 switch (old_entry->inheritance) {
2797                 case VM_INHERIT_NONE:
2798                         break;
2799
2800                 case VM_INHERIT_SHARE:
2801                         /*
2802                          * Clone the entry, creating the shared object if
2803                          * necessary.
2804                          */
2805                         object = old_entry->object.vm_object;
2806                         if (object == NULL) {
2807                                 object = vm_object_allocate(OBJT_DEFAULT,
2808                                         atop(old_entry->end - old_entry->start));
2809                                 old_entry->object.vm_object = object;
2810                                 old_entry->offset = (vm_offset_t) 0;
2811                         }
2812
2813                         /*
2814                          * Add the reference before calling vm_object_shadow
2815                          * to insure that a shadow object is created.
2816                          */
2817                         vm_object_reference(object);
2818                         if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2819                                 vm_object_shadow(&old_entry->object.vm_object,
2820                                         &old_entry->offset,
2821                                         atop(old_entry->end - old_entry->start));
2822                                 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
2823                                 /* Transfer the second reference too. */
2824                                 vm_object_reference(
2825                                     old_entry->object.vm_object);
2826                                 vm_object_deallocate(object);
2827                                 object = old_entry->object.vm_object;
2828                         }
2829                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
2830
2831                         /*
2832                          * Clone the entry, referencing the shared object.
2833                          */
2834                         new_entry = vm_map_entry_create(new_map, &count);
2835                         *new_entry = *old_entry;
2836                         new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2837                         new_entry->wired_count = 0;
2838
2839                         /*
2840                          * Insert the entry into the new map -- we know we're
2841                          * inserting at the end of the new map.
2842                          */
2843
2844                         vm_map_entry_link(new_map, new_map->header.prev,
2845                             new_entry);
2846
2847                         /*
2848                          * Update the physical map
2849                          */
2850
2851                         pmap_copy(new_map->pmap, old_map->pmap,
2852                             new_entry->start,
2853                             (old_entry->end - old_entry->start),
2854                             old_entry->start);
2855                         break;
2856
2857                 case VM_INHERIT_COPY:
2858                         /*
2859                          * Clone the entry and link into the map.
2860                          */
2861                         new_entry = vm_map_entry_create(new_map, &count);
2862                         *new_entry = *old_entry;
2863                         new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2864                         new_entry->wired_count = 0;
2865                         new_entry->object.vm_object = NULL;
2866                         vm_map_entry_link(new_map, new_map->header.prev,
2867                             new_entry);
2868                         vm_map_copy_entry(old_map, new_map, old_entry,
2869                             new_entry);
2870                         break;
2871                 }
2872                 old_entry = old_entry->next;
2873         }
2874
2875         new_map->size = old_map->size;
2876         old_map->infork = 0;
2877         vm_map_unlock(old_map);
2878         vm_map_entry_release(count);
2879
2880         return (vm2);
2881 }
2882
2883 int
2884 vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
2885               vm_prot_t prot, vm_prot_t max, int cow)
2886 {
2887         vm_map_entry_t prev_entry;
2888         vm_map_entry_t new_stack_entry;
2889         vm_size_t      init_ssize;
2890         int            rv;
2891         int             count;
2892
2893         if (VM_MIN_ADDRESS > 0 && addrbos < VM_MIN_ADDRESS)
2894                 return (KERN_NO_SPACE);
2895
2896         if (max_ssize < sgrowsiz)
2897                 init_ssize = max_ssize;
2898         else
2899                 init_ssize = sgrowsiz;
2900
2901         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2902         vm_map_lock(map);
2903
2904         /* If addr is already mapped, no go */
2905         if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
2906                 vm_map_unlock(map);
2907                 vm_map_entry_release(count);
2908                 return (KERN_NO_SPACE);
2909         }
2910
2911         /* If we would blow our VMEM resource limit, no go */
2912         if (map->size + init_ssize >
2913             curproc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
2914                 vm_map_unlock(map);
2915                 vm_map_entry_release(count);
2916                 return (KERN_NO_SPACE);
2917         }
2918
2919         /* If we can't accomodate max_ssize in the current mapping,
2920          * no go.  However, we need to be aware that subsequent user
2921          * mappings might map into the space we have reserved for
2922          * stack, and currently this space is not protected.  
2923          * 
2924          * Hopefully we will at least detect this condition 
2925          * when we try to grow the stack.
2926          */
2927         if ((prev_entry->next != &map->header) &&
2928             (prev_entry->next->start < addrbos + max_ssize)) {
2929                 vm_map_unlock(map);
2930                 vm_map_entry_release(count);
2931                 return (KERN_NO_SPACE);
2932         }
2933
2934         /* We initially map a stack of only init_ssize.  We will
2935          * grow as needed later.  Since this is to be a grow 
2936          * down stack, we map at the top of the range.
2937          *
2938          * Note: we would normally expect prot and max to be
2939          * VM_PROT_ALL, and cow to be 0.  Possibly we should
2940          * eliminate these as input parameters, and just
2941          * pass these values here in the insert call.
2942          */
2943         rv = vm_map_insert(map, &count,
2944                            NULL, 0, addrbos + max_ssize - init_ssize,
2945                            addrbos + max_ssize,
2946                            VM_MAPTYPE_NORMAL,
2947                            prot, max,
2948                            cow);
2949
2950         /* Now set the avail_ssize amount */
2951         if (rv == KERN_SUCCESS) {
2952                 if (prev_entry != &map->header)
2953                         vm_map_clip_end(map, prev_entry, addrbos + max_ssize - init_ssize, &count);
2954                 new_stack_entry = prev_entry->next;
2955                 if (new_stack_entry->end   != addrbos + max_ssize ||
2956                     new_stack_entry->start != addrbos + max_ssize - init_ssize)
2957                         panic ("Bad entry start/end for new stack entry");
2958                 else 
2959                         new_stack_entry->aux.avail_ssize = max_ssize - init_ssize;
2960         }
2961
2962         vm_map_unlock(map);
2963         vm_map_entry_release(count);
2964         return (rv);
2965 }
2966
2967 /* Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
2968  * desired address is already mapped, or if we successfully grow
2969  * the stack.  Also returns KERN_SUCCESS if addr is outside the
2970  * stack range (this is strange, but preserves compatibility with
2971  * the grow function in vm_machdep.c).
2972  */
2973 int
2974 vm_map_growstack (struct proc *p, vm_offset_t addr)
2975 {
2976         vm_map_entry_t prev_entry;
2977         vm_map_entry_t stack_entry;
2978         vm_map_entry_t new_stack_entry;
2979         struct vmspace *vm = p->p_vmspace;
2980         vm_map_t map = &vm->vm_map;
2981         vm_offset_t    end;
2982         int grow_amount;
2983         int rv = KERN_SUCCESS;
2984         int is_procstack;
2985         int use_read_lock = 1;
2986         int count;
2987
2988         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2989 Retry:
2990         if (use_read_lock)
2991                 vm_map_lock_read(map);
2992         else
2993                 vm_map_lock(map);
2994
2995         /* If addr is already in the entry range, no need to grow.*/
2996         if (vm_map_lookup_entry(map, addr, &prev_entry))
2997                 goto done;
2998
2999         if ((stack_entry = prev_entry->next) == &map->header)
3000                 goto done;
3001         if (prev_entry == &map->header) 
3002                 end = stack_entry->start - stack_entry->aux.avail_ssize;
3003         else
3004                 end = prev_entry->end;
3005
3006         /* This next test mimics the old grow function in vm_machdep.c.
3007          * It really doesn't quite make sense, but we do it anyway
3008          * for compatibility.
3009          *
3010          * If not growable stack, return success.  This signals the
3011          * caller to proceed as he would normally with normal vm.
3012          */
3013         if (stack_entry->aux.avail_ssize < 1 ||
3014             addr >= stack_entry->start ||
3015             addr <  stack_entry->start - stack_entry->aux.avail_ssize) {
3016                 goto done;
3017         } 
3018         
3019         /* Find the minimum grow amount */
3020         grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE);
3021         if (grow_amount > stack_entry->aux.avail_ssize) {
3022                 rv = KERN_NO_SPACE;
3023                 goto done;
3024         }
3025
3026         /* If there is no longer enough space between the entries
3027          * nogo, and adjust the available space.  Note: this 
3028          * should only happen if the user has mapped into the
3029          * stack area after the stack was created, and is
3030          * probably an error.
3031          *
3032          * This also effectively destroys any guard page the user
3033          * might have intended by limiting the stack size.
3034          */
3035         if (grow_amount > stack_entry->start - end) {
3036                 if (use_read_lock && vm_map_lock_upgrade(map)) {
3037                         use_read_lock = 0;
3038                         goto Retry;
3039                 }
3040                 use_read_lock = 0;
3041                 stack_entry->aux.avail_ssize = stack_entry->start - end;
3042                 rv = KERN_NO_SPACE;
3043                 goto done;
3044         }
3045
3046         is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr;
3047
3048         /* If this is the main process stack, see if we're over the 
3049          * stack limit.
3050          */
3051         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
3052                              p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
3053                 rv = KERN_NO_SPACE;
3054                 goto done;
3055         }
3056
3057         /* Round up the grow amount modulo SGROWSIZ */
3058         grow_amount = roundup (grow_amount, sgrowsiz);
3059         if (grow_amount > stack_entry->aux.avail_ssize) {
3060                 grow_amount = stack_entry->aux.avail_ssize;
3061         }
3062         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
3063                              p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
3064                 grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
3065                               ctob(vm->vm_ssize);
3066         }
3067
3068         /* If we would blow our VMEM resource limit, no go */
3069         if (map->size + grow_amount > p->p_rlimit[RLIMIT_VMEM].rlim_cur) {
3070                 rv = KERN_NO_SPACE;
3071                 goto done;
3072         }
3073
3074         if (use_read_lock && vm_map_lock_upgrade(map)) {
3075                 use_read_lock = 0;
3076                 goto Retry;
3077         }
3078         use_read_lock = 0;
3079
3080         /* Get the preliminary new entry start value */
3081         addr = stack_entry->start - grow_amount;
3082
3083         /* If this puts us into the previous entry, cut back our growth
3084          * to the available space.  Also, see the note above.
3085          */
3086         if (addr < end) {
3087                 stack_entry->aux.avail_ssize = stack_entry->start - end;
3088                 addr = end;
3089         }
3090
3091         rv = vm_map_insert(map, &count,
3092                            NULL, 0, addr, stack_entry->start,
3093                            VM_MAPTYPE_NORMAL,
3094                            VM_PROT_ALL, VM_PROT_ALL,
3095                            0);
3096
3097         /* Adjust the available stack space by the amount we grew. */
3098         if (rv == KERN_SUCCESS) {
3099                 if (prev_entry != &map->header)
3100                         vm_map_clip_end(map, prev_entry, addr, &count);
3101                 new_stack_entry = prev_entry->next;
3102                 if (new_stack_entry->end   != stack_entry->start  ||
3103                     new_stack_entry->start != addr)
3104                         panic ("Bad stack grow start/end in new stack entry");
3105                 else {
3106                         new_stack_entry->aux.avail_ssize =
3107                                 stack_entry->aux.avail_ssize -
3108                                 (new_stack_entry->end - new_stack_entry->start);
3109                         if (is_procstack)
3110                                 vm->vm_ssize += btoc(new_stack_entry->end -
3111                                                      new_stack_entry->start);
3112                 }
3113         }
3114
3115 done:
3116         if (use_read_lock)
3117                 vm_map_unlock_read(map);
3118         else
3119                 vm_map_unlock(map);
3120         vm_map_entry_release(count);
3121         return (rv);
3122 }
3123
3124 /*
3125  * Unshare the specified VM space for exec.  If other processes are
3126  * mapped to it, then create a new one.  The new vmspace is null.
3127  */
3128
3129 void
3130 vmspace_exec(struct proc *p, struct vmspace *vmcopy) 
3131 {
3132         struct vmspace *oldvmspace = p->p_vmspace;
3133         struct vmspace *newvmspace;
3134         vm_map_t map = &p->p_vmspace->vm_map;
3135
3136         /*
3137          * If we are execing a resident vmspace we fork it, otherwise
3138          * we create a new vmspace.  Note that exitingcnt and upcalls
3139          * are not copied to the new vmspace.
3140          */
3141         if (vmcopy)  {
3142             newvmspace = vmspace_fork(vmcopy);
3143         } else {
3144             newvmspace = vmspace_alloc(map->min_offset, map->max_offset);
3145             bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
3146                 (caddr_t)&oldvmspace->vm_endcopy - 
3147                     (caddr_t)&oldvmspace->vm_startcopy);
3148         }
3149
3150         /*
3151          * This code is written like this for prototype purposes.  The
3152          * goal is to avoid running down the vmspace here, but let the
3153          * other process's that are still using the vmspace to finally
3154          * run it down.  Even though there is little or no chance of blocking
3155          * here, it is a good idea to keep this form for future mods.
3156          */
3157         p->p_vmspace = newvmspace;
3158         pmap_pinit2(vmspace_pmap(newvmspace));
3159         if (p == curproc)
3160                 pmap_activate(p);
3161         vmspace_free(oldvmspace);
3162 }
3163
3164 /*
3165  * Unshare the specified VM space for forcing COW.  This
3166  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
3167  *
3168  * The exitingcnt test is not strictly necessary but has been
3169  * included for code sanity (to make the code a bit more deterministic).
3170  */
3171
3172 void
3173 vmspace_unshare(struct proc *p) 
3174 {
3175         struct vmspace *oldvmspace = p->p_vmspace;
3176         struct vmspace *newvmspace;
3177
3178         if (oldvmspace->vm_refcnt == 1 && oldvmspace->vm_exitingcnt == 0)
3179                 return;
3180         newvmspace = vmspace_fork(oldvmspace);
3181         p->p_vmspace = newvmspace;
3182         pmap_pinit2(vmspace_pmap(newvmspace));
3183         if (p == curproc)
3184                 pmap_activate(p);
3185         vmspace_free(oldvmspace);
3186 }
3187
3188 /*
3189  *      vm_map_lookup:
3190  *
3191  *      Finds the VM object, offset, and
3192  *      protection for a given virtual address in the
3193  *      specified map, assuming a page fault of the
3194  *      type specified.
3195  *
3196  *      Leaves the map in question locked for read; return
3197  *      values are guaranteed until a vm_map_lookup_done
3198  *      call is performed.  Note that the map argument
3199  *      is in/out; the returned map must be used in
3200  *      the call to vm_map_lookup_done.
3201  *
3202  *      A handle (out_entry) is returned for use in
3203  *      vm_map_lookup_done, to make that fast.
3204  *
3205  *      If a lookup is requested with "write protection"
3206  *      specified, the map may be changed to perform virtual
3207  *      copying operations, although the data referenced will
3208  *      remain the same.
3209  */
3210 int
3211 vm_map_lookup(vm_map_t *var_map,                /* IN/OUT */
3212               vm_offset_t vaddr,
3213               vm_prot_t fault_typea,
3214               vm_map_entry_t *out_entry,        /* OUT */
3215               vm_object_t *object,              /* OUT */
3216               vm_pindex_t *pindex,              /* OUT */
3217               vm_prot_t *out_prot,              /* OUT */
3218               boolean_t *wired)                 /* OUT */
3219 {
3220         vm_map_entry_t entry;
3221         vm_map_t map = *var_map;
3222         vm_prot_t prot;
3223         vm_prot_t fault_type = fault_typea;
3224         int use_read_lock = 1;
3225         int rv = KERN_SUCCESS;
3226
3227 RetryLookup:
3228         if (use_read_lock)
3229                 vm_map_lock_read(map);
3230         else
3231                 vm_map_lock(map);
3232
3233         /*
3234          * If the map has an interesting hint, try it before calling full
3235          * blown lookup routine.
3236          */
3237         entry = map->hint;
3238         *out_entry = entry;
3239
3240         if ((entry == &map->header) ||
3241             (vaddr < entry->start) || (vaddr >= entry->end)) {
3242                 vm_map_entry_t tmp_entry;
3243
3244                 /*
3245                  * Entry was either not a valid hint, or the vaddr was not
3246                  * contained in the entry, so do a full lookup.
3247                  */
3248                 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) {
3249                         rv = KERN_INVALID_ADDRESS;
3250                         goto done;
3251                 }
3252
3253                 entry = tmp_entry;
3254                 *out_entry = entry;
3255         }
3256         
3257         /*
3258          * Handle submaps.
3259          */
3260         if (entry->maptype == VM_MAPTYPE_SUBMAP) {
3261                 vm_map_t old_map = map;
3262
3263                 *var_map = map = entry->object.sub_map;
3264                 if (use_read_lock)
3265                         vm_map_unlock_read(old_map);
3266                 else
3267                         vm_map_unlock(old_map);
3268                 use_read_lock = 1;
3269                 goto RetryLookup;
3270         }
3271
3272         /*
3273          * Check whether this task is allowed to have this page.
3274          * Note the special case for MAP_ENTRY_COW
3275          * pages with an override.  This is to implement a forced
3276          * COW for debuggers.
3277          */
3278
3279         if (fault_type & VM_PROT_OVERRIDE_WRITE)
3280                 prot = entry->max_protection;
3281         else
3282                 prot = entry->protection;
3283
3284         fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
3285         if ((fault_type & prot) != fault_type) {
3286                 rv = KERN_PROTECTION_FAILURE;
3287                 goto done;
3288         }
3289
3290         if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
3291             (entry->eflags & MAP_ENTRY_COW) &&
3292             (fault_type & VM_PROT_WRITE) &&
3293             (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
3294                 rv = KERN_PROTECTION_FAILURE;
3295                 goto done;
3296         }
3297
3298         /*
3299          * If this page is not pageable, we have to get it for all possible
3300          * accesses.
3301          */
3302         *wired = (entry->wired_count != 0);
3303         if (*wired)
3304                 prot = fault_type = entry->protection;
3305
3306         /*
3307          * Virtual page tables may need to update the accessed (A) bit
3308          * in a page table entry.  Upgrade the fault to a write fault for
3309          * that case if the map will support it.  If the map does not support
3310          * it the page table entry simply will not be updated.
3311          */
3312         if (entry->maptype == VM_MAPTYPE_VPAGETABLE) {
3313                 if (prot & VM_PROT_WRITE)
3314                         fault_type |= VM_PROT_WRITE;
3315         }
3316
3317         /*
3318          * If the entry was copy-on-write, we either ...
3319          */
3320         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3321                 /*
3322                  * If we want to write the page, we may as well handle that
3323                  * now since we've got the map locked.
3324                  *
3325                  * If we don't need to write the page, we just demote the
3326                  * permissions allowed.
3327                  */
3328
3329                 if (fault_type & VM_PROT_WRITE) {
3330                         /*
3331                          * Make a new object, and place it in the object
3332                          * chain.  Note that no new references have appeared
3333                          * -- one just moved from the map to the new
3334                          * object.
3335                          */
3336
3337                         if (use_read_lock && vm_map_lock_upgrade(map)) {
3338                                 use_read_lock = 0;
3339                                 goto RetryLookup;
3340                         }
3341                         use_read_lock = 0;
3342
3343                         vm_object_shadow(
3344                             &entry->object.vm_object,
3345                             &entry->offset,
3346                             atop(entry->end - entry->start));
3347
3348                         entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
3349                 } else {
3350                         /*
3351                          * We're attempting to read a copy-on-write page --
3352                          * don't allow writes.
3353                          */
3354
3355                         prot &= ~VM_PROT_WRITE;
3356                 }
3357         }
3358
3359         /*
3360          * Create an object if necessary.
3361          */
3362         if (entry->object.vm_object == NULL &&
3363             !map->system_map) {
3364                 if (use_read_lock && vm_map_lock_upgrade(map))  {
3365                         use_read_lock = 0;
3366                         goto RetryLookup;
3367                 }
3368                 use_read_lock = 0;
3369                 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
3370                     atop(entry->end - entry->start));
3371                 entry->offset = 0;
3372         }
3373
3374         /*
3375          * Return the object/offset from this entry.  If the entry was
3376          * copy-on-write or empty, it has been fixed up.
3377          */
3378
3379         *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
3380         *object = entry->object.vm_object;
3381
3382         /*
3383          * Return whether this is the only map sharing this data.  On
3384          * success we return with a read lock held on the map.  On failure
3385          * we return with the map unlocked.
3386          */
3387         *out_prot = prot;
3388 done:
3389         if (rv == KERN_SUCCESS) {
3390                 if (use_read_lock == 0)
3391                         vm_map_lock_downgrade(map);
3392         } else if (use_read_lock) {
3393                 vm_map_unlock_read(map);
3394         } else {
3395                 vm_map_unlock(map);
3396         }
3397         return (rv);
3398 }
3399
3400 /*
3401  *      vm_map_lookup_done:
3402  *
3403  *      Releases locks acquired by a vm_map_lookup
3404  *      (according to the handle returned by that lookup).
3405  */
3406
3407 void
3408 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry, int count)
3409 {
3410         /*
3411          * Unlock the main-level map
3412          */
3413         vm_map_unlock_read(map);
3414         if (count)
3415                 vm_map_entry_release(count);
3416 }
3417
3418 #include "opt_ddb.h"
3419 #ifdef DDB
3420 #include <sys/kernel.h>
3421
3422 #include <ddb/ddb.h>
3423
3424 /*
3425  *      vm_map_print:   [ debug ]
3426  */
3427 DB_SHOW_COMMAND(map, vm_map_print)
3428 {
3429         static int nlines;
3430         /* XXX convert args. */
3431         vm_map_t map = (vm_map_t)addr;
3432         boolean_t full = have_addr;
3433
3434         vm_map_entry_t entry;
3435
3436         db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
3437             (void *)map,
3438             (void *)map->pmap, map->nentries, map->timestamp);
3439         nlines++;
3440
3441         if (!full && db_indent)
3442                 return;
3443
3444         db_indent += 2;
3445         for (entry = map->header.next; entry != &map->header;
3446             entry = entry->next) {
3447                 db_iprintf("map entry %p: start=%p, end=%p\n",
3448                     (void *)entry, (void *)entry->start, (void *)entry->end);
3449                 nlines++;
3450                 {
3451                         static char *inheritance_name[4] =
3452                         {"share", "copy", "none", "donate_copy"};
3453
3454                         db_iprintf(" prot=%x/%x/%s",
3455                             entry->protection,
3456                             entry->max_protection,
3457                             inheritance_name[(int)(unsigned char)entry->inheritance]);
3458                         if (entry->wired_count != 0)
3459                                 db_printf(", wired");
3460                 }
3461                 if (entry->maptype == VM_MAPTYPE_SUBMAP) {
3462                         /* XXX no %qd in kernel.  Truncate entry->offset. */
3463                         db_printf(", share=%p, offset=0x%lx\n",
3464                             (void *)entry->object.sub_map,
3465                             (long)entry->offset);
3466                         nlines++;
3467                         if ((entry->prev == &map->header) ||
3468                             (entry->prev->object.sub_map !=
3469                                 entry->object.sub_map)) {
3470                                 db_indent += 2;
3471                                 vm_map_print((db_expr_t)(intptr_t)
3472                                              entry->object.sub_map,
3473                                              full, 0, (char *)0);
3474                                 db_indent -= 2;
3475                         }
3476                 } else {
3477                         /* XXX no %qd in kernel.  Truncate entry->offset. */
3478                         db_printf(", object=%p, offset=0x%lx",
3479                             (void *)entry->object.vm_object,
3480                             (long)entry->offset);
3481                         if (entry->eflags & MAP_ENTRY_COW)
3482                                 db_printf(", copy (%s)",
3483                                     (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
3484                         db_printf("\n");
3485                         nlines++;
3486
3487                         if ((entry->prev == &map->header) ||
3488                             (entry->prev->object.vm_object !=
3489                                 entry->object.vm_object)) {
3490                                 db_indent += 2;
3491                                 vm_object_print((db_expr_t)(intptr_t)
3492                                                 entry->object.vm_object,
3493                                                 full, 0, (char *)0);
3494                                 nlines += 4;
3495                                 db_indent -= 2;
3496                         }
3497                 }
3498         }
3499         db_indent -= 2;
3500         if (db_indent == 0)
3501                 nlines = 0;
3502 }
3503
3504
3505 DB_SHOW_COMMAND(procvm, procvm)
3506 {
3507         struct proc *p;
3508
3509         if (have_addr) {
3510                 p = (struct proc *) addr;
3511         } else {
3512                 p = curproc;
3513         }
3514
3515         db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
3516             (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
3517             (void *)vmspace_pmap(p->p_vmspace));
3518
3519         vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
3520 }
3521
3522 #endif /* DDB */