Add an alignment feature to vm_map_findspace(). This feature will be used
[dragonfly.git] / sys / vm / vm_map.c
1 /*
2  * Copyright (c) 1991, 1993
3  *      The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by the University of
19  *      California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *      from: @(#)vm_map.c      8.3 (Berkeley) 1/12/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $FreeBSD: src/sys/vm/vm_map.c,v 1.187.2.19 2003/05/27 00:47:02 alc Exp $
65  * $DragonFly: src/sys/vm/vm_map.c,v 1.9 2003/08/25 17:01:13 dillon Exp $
66  */
67
68 /*
69  *      Virtual memory mapping module.
70  */
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/proc.h>
75 #include <sys/lock.h>
76 #include <sys/vmmeter.h>
77 #include <sys/mman.h>
78 #include <sys/vnode.h>
79 #include <sys/resourcevar.h>
80 #include <sys/shm.h>
81
82 #include <vm/vm.h>
83 #include <vm/vm_param.h>
84 #include <vm/pmap.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_page.h>
87 #include <vm/vm_object.h>
88 #include <vm/vm_pager.h>
89 #include <vm/vm_kern.h>
90 #include <vm/vm_extern.h>
91 #include <vm/swap_pager.h>
92 #include <vm/vm_zone.h>
93
94 /*
95  *      Virtual memory maps provide for the mapping, protection,
96  *      and sharing of virtual memory objects.  In addition,
97  *      this module provides for an efficient virtual copy of
98  *      memory from one map to another.
99  *
100  *      Synchronization is required prior to most operations.
101  *
102  *      Maps consist of an ordered doubly-linked list of simple
103  *      entries; a single hint is used to speed up lookups.
104  *
105  *      Since portions of maps are specified by start/end addresses,
106  *      which may not align with existing map entries, all
107  *      routines merely "clip" entries to these start/end values.
108  *      [That is, an entry is split into two, bordering at a
109  *      start or end value.]  Note that these clippings may not
110  *      always be necessary (as the two resulting entries are then
111  *      not changed); however, the clipping is done for convenience.
112  *
113  *      As mentioned above, virtual copy operations are performed
114  *      by copying VM object references from one map to
115  *      another, and then marking both regions as copy-on-write.
116  */
117
118 /*
119  *      vm_map_startup:
120  *
121  *      Initialize the vm_map module.  Must be called before
122  *      any other vm_map routines.
123  *
124  *      Map and entry structures are allocated from the general
125  *      purpose memory pool with some exceptions:
126  *
127  *      - The kernel map and kmem submap are allocated statically.
128  *      - Kernel map entries are allocated out of a static pool.
129  *
130  *      These restrictions are necessary since malloc() uses the
131  *      maps and requires map entries.
132  */
133
134 static struct vm_zone kmapentzone_store, mapentzone_store, mapzone_store;
135 static vm_zone_t mapentzone, kmapentzone, mapzone, vmspace_zone;
136 static struct vm_object kmapentobj, mapentobj, mapobj;
137
138 static struct vm_map_entry map_entry_init[MAX_MAPENT];
139 static struct vm_map_entry kmap_entry_init[MAX_KMAPENT];
140 static struct vm_map map_init[MAX_KMAP];
141
142 static void _vm_map_clip_end (vm_map_t, vm_map_entry_t, vm_offset_t);
143 static void _vm_map_clip_start (vm_map_t, vm_map_entry_t, vm_offset_t);
144 static vm_map_entry_t vm_map_entry_create (vm_map_t);
145 static void vm_map_entry_delete (vm_map_t, vm_map_entry_t);
146 static void vm_map_entry_dispose (vm_map_t, vm_map_entry_t);
147 static void vm_map_entry_unwire (vm_map_t, vm_map_entry_t);
148 static void vm_map_copy_entry (vm_map_t, vm_map_t, vm_map_entry_t,
149                 vm_map_entry_t);
150 static void vm_map_split (vm_map_entry_t);
151 static void vm_map_unclip_range (vm_map_t map, vm_map_entry_t start_entry, vm_offset_t start, vm_offset_t end, int flags);
152
153 void
154 vm_map_startup()
155 {
156         mapzone = &mapzone_store;
157         zbootinit(mapzone, "MAP", sizeof (struct vm_map),
158                 map_init, MAX_KMAP);
159         kmapentzone = &kmapentzone_store;
160         zbootinit(kmapentzone, "KMAP ENTRY", sizeof (struct vm_map_entry),
161                 kmap_entry_init, MAX_KMAPENT);
162         mapentzone = &mapentzone_store;
163         zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry),
164                 map_entry_init, MAX_MAPENT);
165 }
166
167 /*
168  * Allocate a vmspace structure, including a vm_map and pmap,
169  * and initialize those structures.  The refcnt is set to 1.
170  * The remaining fields must be initialized by the caller.
171  */
172 struct vmspace *
173 vmspace_alloc(min, max)
174         vm_offset_t min, max;
175 {
176         struct vmspace *vm;
177
178         vm = zalloc(vmspace_zone);
179         vm_map_init(&vm->vm_map, min, max);
180         pmap_pinit(vmspace_pmap(vm));
181         vm->vm_map.pmap = vmspace_pmap(vm);             /* XXX */
182         vm->vm_refcnt = 1;
183         vm->vm_shm = NULL;
184         vm->vm_exitingcnt = 0;
185         return (vm);
186 }
187
188 void
189 vm_init2(void) {
190         zinitna(kmapentzone, &kmapentobj,
191                 NULL, 0, lmin((VM_MAX_KERNEL_ADDRESS - KERNBASE) / PAGE_SIZE,
192                 vmstats.v_page_count) / 8, ZONE_INTERRUPT, 1);
193         zinitna(mapentzone, &mapentobj,
194                 NULL, 0, 0, 0, 1);
195         zinitna(mapzone, &mapobj,
196                 NULL, 0, 0, 0, 1);
197         vmspace_zone = zinit("VMSPACE", sizeof (struct vmspace), 0, 0, 3);
198         pmap_init2();
199         vm_object_init2();
200 }
201
202 static __inline void
203 vmspace_dofree(struct vmspace *vm)
204 {
205         /*
206          * Make sure any SysV shm is freed, it might not have in
207          * exit1()
208          */
209         shmexit(vm);
210
211         /*
212          * Lock the map, to wait out all other references to it.
213          * Delete all of the mappings and pages they hold, then call
214          * the pmap module to reclaim anything left.
215          */
216         vm_map_lock(&vm->vm_map);
217         (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
218             vm->vm_map.max_offset);
219         vm_map_unlock(&vm->vm_map);
220
221         pmap_release(vmspace_pmap(vm));
222         zfree(vmspace_zone, vm);
223 }
224
225 void
226 vmspace_free(struct vmspace *vm)
227 {
228         if (vm->vm_refcnt == 0)
229                 panic("vmspace_free: attempt to free already freed vmspace");
230
231         if (--vm->vm_refcnt == 0 && vm->vm_exitingcnt == 0)
232                 vmspace_dofree(vm);
233 }
234
235 void
236 vmspace_exitfree(struct proc *p)
237 {
238         struct vmspace *vm;
239
240         vm = p->p_vmspace;
241         p->p_vmspace = NULL;
242
243         /*
244          * cleanup by parent process wait()ing on exiting child.  vm_refcnt
245          * may not be 0 (e.g. fork() and child exits without exec()ing).
246          * exitingcnt may increment above 0 and drop back down to zero
247          * several times while vm_refcnt is held non-zero.  vm_refcnt
248          * may also increment above 0 and drop back down to zero several
249          * times while vm_exitingcnt is held non-zero.
250          *
251          * The last wait on the exiting child's vmspace will clean up
252          * the remainder of the vmspace.
253          */
254         if (--vm->vm_exitingcnt == 0 && vm->vm_refcnt == 0)
255                 vmspace_dofree(vm);
256 }
257
258 /*
259  * vmspace_swap_count() - count the approximate swap useage in pages for a
260  *                        vmspace.
261  *
262  *      Swap useage is determined by taking the proportional swap used by
263  *      VM objects backing the VM map.  To make up for fractional losses,
264  *      if the VM object has any swap use at all the associated map entries
265  *      count for at least 1 swap page.
266  */
267 int
268 vmspace_swap_count(struct vmspace *vmspace)
269 {
270         vm_map_t map = &vmspace->vm_map;
271         vm_map_entry_t cur;
272         int count = 0;
273
274         for (cur = map->header.next; cur != &map->header; cur = cur->next) {
275                 vm_object_t object;
276
277                 if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
278                     (object = cur->object.vm_object) != NULL &&
279                     object->type == OBJT_SWAP
280                 ) {
281                         int n = (cur->end - cur->start) / PAGE_SIZE;
282
283                         if (object->un_pager.swp.swp_bcount) {
284                                 count += object->un_pager.swp.swp_bcount *
285                                     SWAP_META_PAGES * n / object->size + 1;
286                         }
287                 }
288         }
289         return(count);
290 }
291
292
293 /*
294  *      vm_map_create:
295  *
296  *      Creates and returns a new empty VM map with
297  *      the given physical map structure, and having
298  *      the given lower and upper address bounds.
299  */
300 vm_map_t
301 vm_map_create(pmap, min, max)
302         pmap_t pmap;
303         vm_offset_t min, max;
304 {
305         vm_map_t result;
306
307         result = zalloc(mapzone);
308         vm_map_init(result, min, max);
309         result->pmap = pmap;
310         return (result);
311 }
312
313 /*
314  * Initialize an existing vm_map structure
315  * such as that in the vmspace structure.
316  * The pmap is set elsewhere.
317  */
318 void
319 vm_map_init(map, min, max)
320         struct vm_map *map;
321         vm_offset_t min, max;
322 {
323         map->header.next = map->header.prev = &map->header;
324         map->nentries = 0;
325         map->size = 0;
326         map->system_map = 0;
327         map->infork = 0;
328         map->min_offset = min;
329         map->max_offset = max;
330         map->first_free = &map->header;
331         map->hint = &map->header;
332         map->timestamp = 0;
333         lockinit(&map->lock, 0, "thrd_sleep", 0, LK_NOPAUSE);
334 }
335
336 /*
337  *      vm_map_entry_create:    [ internal use only ]
338  *
339  *      Allocates a VM map entry for insertion.  No entry fields are filled 
340  *      in.  this ruotine may be called from an interrupt.
341  */
342 static vm_map_entry_t
343 vm_map_entry_create(map)
344         vm_map_t map;
345 {
346         vm_map_entry_t new_entry;
347
348         if (map->system_map || !mapentzone)
349                 new_entry = zalloc(kmapentzone);
350         else
351                 new_entry = zalloc(mapentzone);
352         if (new_entry == NULL)
353                 panic("vm_map_entry_create: kernel resources exhausted");
354         return(new_entry);
355 }
356
357 /*
358  *      vm_map_entry_dispose:   [ internal use only ]
359  *
360  *      Dispose of a vm_map_entry that is no longer being referenced.  This
361  *      function may be called from an interrupt.
362  */
363 static void
364 vm_map_entry_dispose(map, entry)
365         vm_map_t map;
366         vm_map_entry_t entry;
367 {
368         if (map->system_map || !mapentzone)
369                 zfree(kmapentzone, entry);
370         else
371                 zfree(mapentzone, entry);
372 }
373
374
375 /*
376  *      vm_map_entry_{un,}link:
377  *
378  *      Insert/remove entries from maps.
379  */
380 static __inline void
381 vm_map_entry_link(vm_map_t map,
382                   vm_map_entry_t after_where,
383                   vm_map_entry_t entry)
384 {
385         map->nentries++;
386         entry->prev = after_where;
387         entry->next = after_where->next;
388         entry->next->prev = entry;
389         after_where->next = entry;
390 }
391
392 static __inline void
393 vm_map_entry_unlink(vm_map_t map,
394                     vm_map_entry_t entry)
395 {
396         vm_map_entry_t prev;
397         vm_map_entry_t next;
398
399         if (entry->eflags & MAP_ENTRY_IN_TRANSITION)
400                 panic("vm_map_entry_unlink: attempt to mess with locked entry! %p", entry);
401         prev = entry->prev;
402         next = entry->next;
403         next->prev = prev;
404         prev->next = next;
405         map->nentries--;
406 }
407
408 /*
409  *      SAVE_HINT:
410  *
411  *      Saves the specified entry as the hint for
412  *      future lookups.
413  */
414 #define SAVE_HINT(map,value) \
415                 (map)->hint = (value);
416
417 /*
418  *      vm_map_lookup_entry:    [ internal use only ]
419  *
420  *      Finds the map entry containing (or
421  *      immediately preceding) the specified address
422  *      in the given map; the entry is returned
423  *      in the "entry" parameter.  The boolean
424  *      result indicates whether the address is
425  *      actually contained in the map.
426  */
427 boolean_t
428 vm_map_lookup_entry(map, address, entry)
429         vm_map_t map;
430         vm_offset_t address;
431         vm_map_entry_t *entry;  /* OUT */
432 {
433         vm_map_entry_t cur;
434         vm_map_entry_t last;
435
436         /*
437          * Start looking either from the head of the list, or from the hint.
438          */
439
440         cur = map->hint;
441
442         if (cur == &map->header)
443                 cur = cur->next;
444
445         if (address >= cur->start) {
446                 /*
447                  * Go from hint to end of list.
448                  *
449                  * But first, make a quick check to see if we are already looking
450                  * at the entry we want (which is usually the case). Note also
451                  * that we don't need to save the hint here... it is the same
452                  * hint (unless we are at the header, in which case the hint
453                  * didn't buy us anything anyway).
454                  */
455                 last = &map->header;
456                 if ((cur != last) && (cur->end > address)) {
457                         *entry = cur;
458                         return (TRUE);
459                 }
460         } else {
461                 /*
462                  * Go from start to hint, *inclusively*
463                  */
464                 last = cur->next;
465                 cur = map->header.next;
466         }
467
468         /*
469          * Search linearly
470          */
471
472         while (cur != last) {
473                 if (cur->end > address) {
474                         if (address >= cur->start) {
475                                 /*
476                                  * Save this lookup for future hints, and
477                                  * return
478                                  */
479
480                                 *entry = cur;
481                                 SAVE_HINT(map, cur);
482                                 return (TRUE);
483                         }
484                         break;
485                 }
486                 cur = cur->next;
487         }
488         *entry = cur->prev;
489         SAVE_HINT(map, *entry);
490         return (FALSE);
491 }
492
493 /*
494  *      vm_map_insert:
495  *
496  *      Inserts the given whole VM object into the target
497  *      map at the specified address range.  The object's
498  *      size should match that of the address range.
499  *
500  *      Requires that the map be locked, and leaves it so.
501  *
502  *      If object is non-NULL, ref count must be bumped by caller
503  *      prior to making call to account for the new entry.
504  */
505 int
506 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
507               vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max,
508               int cow)
509 {
510         vm_map_entry_t new_entry;
511         vm_map_entry_t prev_entry;
512         vm_map_entry_t temp_entry;
513         vm_eflags_t protoeflags;
514
515         /*
516          * Check that the start and end points are not bogus.
517          */
518
519         if ((start < map->min_offset) || (end > map->max_offset) ||
520             (start >= end))
521                 return (KERN_INVALID_ADDRESS);
522
523         /*
524          * Find the entry prior to the proposed starting address; if it's part
525          * of an existing entry, this range is bogus.
526          */
527
528         if (vm_map_lookup_entry(map, start, &temp_entry))
529                 return (KERN_NO_SPACE);
530
531         prev_entry = temp_entry;
532
533         /*
534          * Assert that the next entry doesn't overlap the end point.
535          */
536
537         if ((prev_entry->next != &map->header) &&
538             (prev_entry->next->start < end))
539                 return (KERN_NO_SPACE);
540
541         protoeflags = 0;
542
543         if (cow & MAP_COPY_ON_WRITE)
544                 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
545
546         if (cow & MAP_NOFAULT) {
547                 protoeflags |= MAP_ENTRY_NOFAULT;
548
549                 KASSERT(object == NULL,
550                         ("vm_map_insert: paradoxical MAP_NOFAULT request"));
551         }
552         if (cow & MAP_DISABLE_SYNCER)
553                 protoeflags |= MAP_ENTRY_NOSYNC;
554         if (cow & MAP_DISABLE_COREDUMP)
555                 protoeflags |= MAP_ENTRY_NOCOREDUMP;
556
557         if (object) {
558                 /*
559                  * When object is non-NULL, it could be shared with another
560                  * process.  We have to set or clear OBJ_ONEMAPPING 
561                  * appropriately.
562                  */
563                 if ((object->ref_count > 1) || (object->shadow_count != 0)) {
564                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
565                 }
566         }
567         else if ((prev_entry != &map->header) &&
568                  (prev_entry->eflags == protoeflags) &&
569                  (prev_entry->end == start) &&
570                  (prev_entry->wired_count == 0) &&
571                  ((prev_entry->object.vm_object == NULL) ||
572                   vm_object_coalesce(prev_entry->object.vm_object,
573                                      OFF_TO_IDX(prev_entry->offset),
574                                      (vm_size_t)(prev_entry->end - prev_entry->start),
575                                      (vm_size_t)(end - prev_entry->end)))) {
576                 /*
577                  * We were able to extend the object.  Determine if we
578                  * can extend the previous map entry to include the 
579                  * new range as well.
580                  */
581                 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
582                     (prev_entry->protection == prot) &&
583                     (prev_entry->max_protection == max)) {
584                         map->size += (end - prev_entry->end);
585                         prev_entry->end = end;
586                         vm_map_simplify_entry(map, prev_entry);
587                         return (KERN_SUCCESS);
588                 }
589
590                 /*
591                  * If we can extend the object but cannot extend the
592                  * map entry, we have to create a new map entry.  We
593                  * must bump the ref count on the extended object to
594                  * account for it.  object may be NULL.
595                  */
596                 object = prev_entry->object.vm_object;
597                 offset = prev_entry->offset +
598                         (prev_entry->end - prev_entry->start);
599                 vm_object_reference(object);
600         }
601
602         /*
603          * NOTE: if conditionals fail, object can be NULL here.  This occurs
604          * in things like the buffer map where we manage kva but do not manage
605          * backing objects.
606          */
607
608         /*
609          * Create a new entry
610          */
611
612         new_entry = vm_map_entry_create(map);
613         new_entry->start = start;
614         new_entry->end = end;
615
616         new_entry->eflags = protoeflags;
617         new_entry->object.vm_object = object;
618         new_entry->offset = offset;
619         new_entry->avail_ssize = 0;
620
621         new_entry->inheritance = VM_INHERIT_DEFAULT;
622         new_entry->protection = prot;
623         new_entry->max_protection = max;
624         new_entry->wired_count = 0;
625
626         /*
627          * Insert the new entry into the list
628          */
629
630         vm_map_entry_link(map, prev_entry, new_entry);
631         map->size += new_entry->end - new_entry->start;
632
633         /*
634          * Update the free space hint
635          */
636         if ((map->first_free == prev_entry) &&
637             (prev_entry->end >= new_entry->start)) {
638                 map->first_free = new_entry;
639         }
640
641 #if 0
642         /*
643          * Temporarily removed to avoid MAP_STACK panic, due to
644          * MAP_STACK being a huge hack.  Will be added back in
645          * when MAP_STACK (and the user stack mapping) is fixed.
646          */
647         /*
648          * It may be possible to simplify the entry
649          */
650         vm_map_simplify_entry(map, new_entry);
651 #endif
652
653         if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) {
654                 pmap_object_init_pt(map->pmap, start,
655                                     object, OFF_TO_IDX(offset), end - start,
656                                     cow & MAP_PREFAULT_PARTIAL);
657         }
658
659         return (KERN_SUCCESS);
660 }
661
662 /*
663  * Find sufficient space for `length' bytes in the given map, starting at
664  * `start'.  The map must be locked.  Returns 0 on success, 1 on no space.
665  *
666  * This function will returned an arbitrarily aligned pointer.  If no
667  * particular alignment is required you should pass align as 1.  Note that
668  * the map may return PAGE_SIZE aligned pointers if all the lengths used in
669  * the map are a multiple of PAGE_SIZE, even if you pass a smaller align
670  * argument.
671  *
672  * 'align' should be a power of 2 but is not required to be.
673  */
674 int
675 vm_map_findspace(
676         vm_map_t map,
677         vm_offset_t start,
678         vm_size_t length,
679         vm_offset_t align,
680         vm_offset_t *addr)
681 {
682         vm_map_entry_t entry, next;
683         vm_offset_t end;
684         vm_offset_t align_mask;
685
686         if (start < map->min_offset)
687                 start = map->min_offset;
688         if (start > map->max_offset)
689                 return (1);
690
691         /*
692          * If the alignment is not a power of 2 we will have to use
693          * a mod/division, set align_mask to a special value.
694          */
695         if ((align | (align - 1)) + 1 != (align << 1))
696                 align_mask = (vm_offset_t)-1;
697         else
698                 align_mask = align - 1;
699
700         /*
701          * Look for the first possible address; if there's already something
702          * at this address, we have to start after it.
703          */
704         if (start == map->min_offset) {
705                 if ((entry = map->first_free) != &map->header)
706                         start = entry->end;
707         } else {
708                 vm_map_entry_t tmp;
709
710                 if (vm_map_lookup_entry(map, start, &tmp))
711                         start = tmp->end;
712                 entry = tmp;
713         }
714
715         /*
716          * Look through the rest of the map, trying to fit a new region in the
717          * gap between existing regions, or after the very last region.
718          */
719         for (;; start = (entry = next)->end) {
720                 /*
721                  * Adjust the proposed start by the requested alignment,
722                  * be sure that we didn't wrap the address.
723                  */
724                 if (align_mask == (vm_offset_t)-1)
725                         end = ((start + align - 1) / align) * align;
726                 else
727                         end = (start + align_mask) & ~align_mask;
728                 if (end < start)
729                         return (1);
730                 start = end;
731                 /*
732                  * Find the end of the proposed new region.  Be sure we didn't
733                  * go beyond the end of the map, or wrap around the address.
734                  * Then check to see if this is the last entry or if the 
735                  * proposed end fits in the gap between this and the next
736                  * entry.
737                  */
738                 end = start + length;
739                 if (end > map->max_offset || end < start)
740                         return (1);
741                 next = entry->next;
742                 if (next == &map->header || next->start >= end)
743                         break;
744         }
745         SAVE_HINT(map, entry);
746         *addr = start;
747         if (map == kernel_map) {
748                 vm_offset_t ksize;
749                 if ((ksize = round_page(start + length)) > kernel_vm_end) {
750                         pmap_growkernel(ksize);
751                 }
752         }
753         return (0);
754 }
755
756 /*
757  *      vm_map_find finds an unallocated region in the target address
758  *      map with the given length.  The search is defined to be
759  *      first-fit from the specified address; the region found is
760  *      returned in the same parameter.
761  *
762  *      If object is non-NULL, ref count must be bumped by caller
763  *      prior to making call to account for the new entry.
764  */
765 int
766 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
767             vm_offset_t *addr,  /* IN/OUT */
768             vm_size_t length, boolean_t find_space, vm_prot_t prot,
769             vm_prot_t max, int cow)
770 {
771         vm_offset_t start;
772         int result, s = 0;
773
774         start = *addr;
775
776         if (map == kmem_map || map == mb_map)
777                 s = splvm();
778
779         vm_map_lock(map);
780         if (find_space) {
781                 if (vm_map_findspace(map, start, length, 1, addr)) {
782                         vm_map_unlock(map);
783                         if (map == kmem_map || map == mb_map)
784                                 splx(s);
785                         return (KERN_NO_SPACE);
786                 }
787                 start = *addr;
788         }
789         result = vm_map_insert(map, object, offset,
790                 start, start + length, prot, max, cow);
791         vm_map_unlock(map);
792
793         if (map == kmem_map || map == mb_map)
794                 splx(s);
795
796         return (result);
797 }
798
799 /*
800  *      vm_map_simplify_entry:
801  *
802  *      Simplify the given map entry by merging with either neighbor.  This
803  *      routine also has the ability to merge with both neighbors.
804  *
805  *      The map must be locked.
806  *
807  *      This routine guarentees that the passed entry remains valid (though
808  *      possibly extended).  When merging, this routine may delete one or
809  *      both neighbors.  No action is taken on entries which have their
810  *      in-transition flag set.
811  */
812 void
813 vm_map_simplify_entry(map, entry)
814         vm_map_t map;
815         vm_map_entry_t entry;
816 {
817         vm_map_entry_t next, prev;
818         vm_size_t prevsize, esize;
819
820         if (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP)) {
821                 ++mycpu->gd_cnt.v_intrans_coll;
822                 return;
823         }
824
825         prev = entry->prev;
826         if (prev != &map->header) {
827                 prevsize = prev->end - prev->start;
828                 if ( (prev->end == entry->start) &&
829                      (prev->object.vm_object == entry->object.vm_object) &&
830                      (!prev->object.vm_object ||
831                         (prev->offset + prevsize == entry->offset)) &&
832                      (prev->eflags == entry->eflags) &&
833                      (prev->protection == entry->protection) &&
834                      (prev->max_protection == entry->max_protection) &&
835                      (prev->inheritance == entry->inheritance) &&
836                      (prev->wired_count == entry->wired_count)) {
837                         if (map->first_free == prev)
838                                 map->first_free = entry;
839                         if (map->hint == prev)
840                                 map->hint = entry;
841                         vm_map_entry_unlink(map, prev);
842                         entry->start = prev->start;
843                         entry->offset = prev->offset;
844                         if (prev->object.vm_object)
845                                 vm_object_deallocate(prev->object.vm_object);
846                         vm_map_entry_dispose(map, prev);
847                 }
848         }
849
850         next = entry->next;
851         if (next != &map->header) {
852                 esize = entry->end - entry->start;
853                 if ((entry->end == next->start) &&
854                     (next->object.vm_object == entry->object.vm_object) &&
855                      (!entry->object.vm_object ||
856                         (entry->offset + esize == next->offset)) &&
857                     (next->eflags == entry->eflags) &&
858                     (next->protection == entry->protection) &&
859                     (next->max_protection == entry->max_protection) &&
860                     (next->inheritance == entry->inheritance) &&
861                     (next->wired_count == entry->wired_count)) {
862                         if (map->first_free == next)
863                                 map->first_free = entry;
864                         if (map->hint == next)
865                                 map->hint = entry;
866                         vm_map_entry_unlink(map, next);
867                         entry->end = next->end;
868                         if (next->object.vm_object)
869                                 vm_object_deallocate(next->object.vm_object);
870                         vm_map_entry_dispose(map, next);
871                 }
872         }
873 }
874 /*
875  *      vm_map_clip_start:      [ internal use only ]
876  *
877  *      Asserts that the given entry begins at or after
878  *      the specified address; if necessary,
879  *      it splits the entry into two.
880  */
881 #define vm_map_clip_start(map, entry, startaddr) \
882 { \
883         if (startaddr > entry->start) \
884                 _vm_map_clip_start(map, entry, startaddr); \
885 }
886
887 /*
888  *      This routine is called only when it is known that
889  *      the entry must be split.
890  */
891 static void
892 _vm_map_clip_start(map, entry, start)
893         vm_map_t map;
894         vm_map_entry_t entry;
895         vm_offset_t start;
896 {
897         vm_map_entry_t new_entry;
898
899         /*
900          * Split off the front portion -- note that we must insert the new
901          * entry BEFORE this one, so that this entry has the specified
902          * starting address.
903          */
904
905         vm_map_simplify_entry(map, entry);
906
907         /*
908          * If there is no object backing this entry, we might as well create
909          * one now.  If we defer it, an object can get created after the map
910          * is clipped, and individual objects will be created for the split-up
911          * map.  This is a bit of a hack, but is also about the best place to
912          * put this improvement.
913          */
914
915         if (entry->object.vm_object == NULL && !map->system_map) {
916                 vm_object_t object;
917                 object = vm_object_allocate(OBJT_DEFAULT,
918                                 atop(entry->end - entry->start));
919                 entry->object.vm_object = object;
920                 entry->offset = 0;
921         }
922
923         new_entry = vm_map_entry_create(map);
924         *new_entry = *entry;
925
926         new_entry->end = start;
927         entry->offset += (start - entry->start);
928         entry->start = start;
929
930         vm_map_entry_link(map, entry->prev, new_entry);
931
932         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
933                 vm_object_reference(new_entry->object.vm_object);
934         }
935 }
936
937 /*
938  *      vm_map_clip_end:        [ internal use only ]
939  *
940  *      Asserts that the given entry ends at or before
941  *      the specified address; if necessary,
942  *      it splits the entry into two.
943  */
944
945 #define vm_map_clip_end(map, entry, endaddr) \
946 { \
947         if (endaddr < entry->end) \
948                 _vm_map_clip_end(map, entry, endaddr); \
949 }
950
951 /*
952  *      This routine is called only when it is known that
953  *      the entry must be split.
954  */
955 static void
956 _vm_map_clip_end(map, entry, end)
957         vm_map_t map;
958         vm_map_entry_t entry;
959         vm_offset_t end;
960 {
961         vm_map_entry_t new_entry;
962
963         /*
964          * If there is no object backing this entry, we might as well create
965          * one now.  If we defer it, an object can get created after the map
966          * is clipped, and individual objects will be created for the split-up
967          * map.  This is a bit of a hack, but is also about the best place to
968          * put this improvement.
969          */
970
971         if (entry->object.vm_object == NULL && !map->system_map) {
972                 vm_object_t object;
973                 object = vm_object_allocate(OBJT_DEFAULT,
974                                 atop(entry->end - entry->start));
975                 entry->object.vm_object = object;
976                 entry->offset = 0;
977         }
978
979         /*
980          * Create a new entry and insert it AFTER the specified entry
981          */
982
983         new_entry = vm_map_entry_create(map);
984         *new_entry = *entry;
985
986         new_entry->start = entry->end = end;
987         new_entry->offset += (end - entry->start);
988
989         vm_map_entry_link(map, entry, new_entry);
990
991         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
992                 vm_object_reference(new_entry->object.vm_object);
993         }
994 }
995
996 /*
997  *      VM_MAP_RANGE_CHECK:     [ internal use only ]
998  *
999  *      Asserts that the starting and ending region
1000  *      addresses fall within the valid range of the map.
1001  */
1002 #define VM_MAP_RANGE_CHECK(map, start, end)             \
1003                 {                                       \
1004                 if (start < vm_map_min(map))            \
1005                         start = vm_map_min(map);        \
1006                 if (end > vm_map_max(map))              \
1007                         end = vm_map_max(map);          \
1008                 if (start > end)                        \
1009                         start = end;                    \
1010                 }
1011
1012 /*
1013  *      vm_map_transition_wait: [ kernel use only ]
1014  *
1015  *      Used to block when an in-transition collison occurs.  The map
1016  *      is unlocked for the sleep and relocked before the return.
1017  */
1018 static
1019 void
1020 vm_map_transition_wait(vm_map_t map)
1021 {
1022         vm_map_unlock(map);
1023         tsleep(map, 0, "vment", 0);
1024         vm_map_lock(map);
1025 }
1026
1027 /*
1028  * CLIP_CHECK_BACK
1029  * CLIP_CHECK_FWD
1030  *
1031  *      When we do blocking operations with the map lock held it is
1032  *      possible that a clip might have occured on our in-transit entry,
1033  *      requiring an adjustment to the entry in our loop.  These macros
1034  *      help the pageable and clip_range code deal with the case.  The
1035  *      conditional costs virtually nothing if no clipping has occured.
1036  */
1037
1038 #define CLIP_CHECK_BACK(entry, save_start)              \
1039     do {                                                \
1040             while (entry->start != save_start) {        \
1041                     entry = entry->prev;                \
1042                     KASSERT(entry != &map->header, ("bad entry clip")); \
1043             }                                           \
1044     } while(0)
1045
1046 #define CLIP_CHECK_FWD(entry, save_end)                 \
1047     do {                                                \
1048             while (entry->end != save_end) {            \
1049                     entry = entry->next;                \
1050                     KASSERT(entry != &map->header, ("bad entry clip")); \
1051             }                                           \
1052     } while(0)
1053
1054
1055 /*
1056  *      vm_map_clip_range:      [ kernel use only ]
1057  *
1058  *      Clip the specified range and return the base entry.  The
1059  *      range may cover several entries starting at the returned base
1060  *      and the first and last entry in the covering sequence will be
1061  *      properly clipped to the requested start and end address.
1062  *
1063  *      If no holes are allowed you should pass the MAP_CLIP_NO_HOLES
1064  *      flag.  
1065  *
1066  *      The MAP_ENTRY_IN_TRANSITION flag will be set for the entries
1067  *      covered by the requested range.
1068  *
1069  *      The map must be exclusively locked on entry and will remain locked
1070  *      on return. If no range exists or the range contains holes and you
1071  *      specified that no holes were allowed, NULL will be returned.  This
1072  *      routine may temporarily unlock the map in order avoid a deadlock when
1073  *      sleeping.
1074  */
1075 static
1076 vm_map_entry_t
1077 vm_map_clip_range(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
1078 {
1079         vm_map_entry_t start_entry;
1080         vm_map_entry_t entry;
1081
1082         /*
1083          * Locate the entry and effect initial clipping.  The in-transition
1084          * case does not occur very often so do not try to optimize it.
1085          */
1086 again:
1087         if (vm_map_lookup_entry(map, start, &start_entry) == FALSE)
1088                 return (NULL);
1089         entry = start_entry;
1090         if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1091                 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1092                 ++mycpu->gd_cnt.v_intrans_coll;
1093                 ++mycpu->gd_cnt.v_intrans_wait;
1094                 vm_map_transition_wait(map);
1095                 /*
1096                  * entry and/or start_entry may have been clipped while
1097                  * we slept, or may have gone away entirely.  We have
1098                  * to restart from the lookup.
1099                  */
1100                 goto again;
1101         }
1102         /*
1103          * Since we hold an exclusive map lock we do not have to restart
1104          * after clipping, even though clipping may block in zalloc.
1105          */
1106         vm_map_clip_start(map, entry, start);
1107         vm_map_clip_end(map, entry, end);
1108         entry->eflags |= MAP_ENTRY_IN_TRANSITION;
1109
1110         /*
1111          * Scan entries covered by the range.  When working on the next
1112          * entry a restart need only re-loop on the current entry which
1113          * we have already locked, since 'next' may have changed.  Also,
1114          * even though entry is safe, it may have been clipped so we
1115          * have to iterate forwards through the clip after sleeping.
1116          */
1117         while (entry->next != &map->header && entry->next->start < end) {
1118                 vm_map_entry_t next = entry->next;
1119
1120                 if (flags & MAP_CLIP_NO_HOLES) {
1121                         if (next->start > entry->end) {
1122                                 vm_map_unclip_range(map, start_entry,
1123                                         start, entry->end, flags);
1124                                 return(NULL);
1125                         }
1126                 }
1127
1128                 if (next->eflags & MAP_ENTRY_IN_TRANSITION) {
1129                         vm_offset_t save_end = entry->end;
1130                         next->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1131                         ++mycpu->gd_cnt.v_intrans_coll;
1132                         ++mycpu->gd_cnt.v_intrans_wait;
1133                         vm_map_transition_wait(map);
1134
1135                         /*
1136                          * clips might have occured while we blocked.
1137                          */
1138                         CLIP_CHECK_FWD(entry, save_end);
1139                         CLIP_CHECK_BACK(start_entry, start);
1140                         continue;
1141                 }
1142                 /*
1143                  * No restart necessary even though clip_end may block, we
1144                  * are holding the map lock.
1145                  */
1146                 vm_map_clip_end(map, next, end);
1147                 next->eflags |= MAP_ENTRY_IN_TRANSITION;
1148                 entry = next;
1149         }
1150         if (flags & MAP_CLIP_NO_HOLES) {
1151                 if (entry->end != end) {
1152                         vm_map_unclip_range(map, start_entry,
1153                                 start, entry->end, flags);
1154                         return(NULL);
1155                 }
1156         }
1157         return(start_entry);
1158 }
1159
1160 /*
1161  *      vm_map_unclip_range:    [ kernel use only ]
1162  *
1163  *      Undo the effect of vm_map_clip_range().  You should pass the same
1164  *      flags and the same range that you passed to vm_map_clip_range().
1165  *      This code will clear the in-transition flag on the entries and
1166  *      wake up anyone waiting.  This code will also simplify the sequence 
1167  *      and attempt to merge it with entries before and after the sequence.
1168  *
1169  *      The map must be locked on entry and will remain locked on return.
1170  *
1171  *      Note that you should also pass the start_entry returned by 
1172  *      vm_map_clip_range().  However, if you block between the two calls
1173  *      with the map unlocked please be aware that the start_entry may
1174  *      have been clipped and you may need to scan it backwards to find
1175  *      the entry corresponding with the original start address.  You are
1176  *      responsible for this, vm_map_unclip_range() expects the correct
1177  *      start_entry to be passed to it and will KASSERT otherwise.
1178  */
1179 static
1180 void
1181 vm_map_unclip_range(
1182         vm_map_t map,
1183         vm_map_entry_t start_entry,
1184         vm_offset_t start,
1185         vm_offset_t end,
1186         int flags)
1187 {
1188         vm_map_entry_t entry;
1189
1190         entry = start_entry;
1191
1192         KASSERT(entry->start == start, ("unclip_range: illegal base entry"));
1193         while (entry != &map->header && entry->start < end) {
1194                 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION, ("in-transition flag not set during unclip on: %p", entry));
1195                 KASSERT(entry->end <= end, ("unclip_range: tail wasn't clipped"));
1196                 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
1197                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
1198                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
1199                         wakeup(map);
1200                 }
1201                 entry = entry->next;
1202         }
1203
1204         /*
1205          * Simplification does not block so there is no restart case.
1206          */
1207         entry = start_entry;
1208         while (entry != &map->header && entry->start < end) {
1209                 vm_map_simplify_entry(map, entry);
1210                 entry = entry->next;
1211         }
1212 }
1213
1214 /*
1215  *      vm_map_submap:          [ kernel use only ]
1216  *
1217  *      Mark the given range as handled by a subordinate map.
1218  *
1219  *      This range must have been created with vm_map_find,
1220  *      and no other operations may have been performed on this
1221  *      range prior to calling vm_map_submap.
1222  *
1223  *      Only a limited number of operations can be performed
1224  *      within this rage after calling vm_map_submap:
1225  *              vm_fault
1226  *      [Don't try vm_map_copy!]
1227  *
1228  *      To remove a submapping, one must first remove the
1229  *      range from the superior map, and then destroy the
1230  *      submap (if desired).  [Better yet, don't try it.]
1231  */
1232 int
1233 vm_map_submap(map, start, end, submap)
1234         vm_map_t map;
1235         vm_offset_t start;
1236         vm_offset_t end;
1237         vm_map_t submap;
1238 {
1239         vm_map_entry_t entry;
1240         int result = KERN_INVALID_ARGUMENT;
1241
1242         vm_map_lock(map);
1243
1244         VM_MAP_RANGE_CHECK(map, start, end);
1245
1246         if (vm_map_lookup_entry(map, start, &entry)) {
1247                 vm_map_clip_start(map, entry, start);
1248         } else {
1249                 entry = entry->next;
1250         }
1251
1252         vm_map_clip_end(map, entry, end);
1253
1254         if ((entry->start == start) && (entry->end == end) &&
1255             ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1256             (entry->object.vm_object == NULL)) {
1257                 entry->object.sub_map = submap;
1258                 entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
1259                 result = KERN_SUCCESS;
1260         }
1261         vm_map_unlock(map);
1262
1263         return (result);
1264 }
1265
1266 /*
1267  *      vm_map_protect:
1268  *
1269  *      Sets the protection of the specified address
1270  *      region in the target map.  If "set_max" is
1271  *      specified, the maximum protection is to be set;
1272  *      otherwise, only the current protection is affected.
1273  */
1274 int
1275 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1276                vm_prot_t new_prot, boolean_t set_max)
1277 {
1278         vm_map_entry_t current;
1279         vm_map_entry_t entry;
1280
1281         vm_map_lock(map);
1282
1283         VM_MAP_RANGE_CHECK(map, start, end);
1284
1285         if (vm_map_lookup_entry(map, start, &entry)) {
1286                 vm_map_clip_start(map, entry, start);
1287         } else {
1288                 entry = entry->next;
1289         }
1290
1291         /*
1292          * Make a first pass to check for protection violations.
1293          */
1294
1295         current = entry;
1296         while ((current != &map->header) && (current->start < end)) {
1297                 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1298                         vm_map_unlock(map);
1299                         return (KERN_INVALID_ARGUMENT);
1300                 }
1301                 if ((new_prot & current->max_protection) != new_prot) {
1302                         vm_map_unlock(map);
1303                         return (KERN_PROTECTION_FAILURE);
1304                 }
1305                 current = current->next;
1306         }
1307
1308         /*
1309          * Go back and fix up protections. [Note that clipping is not
1310          * necessary the second time.]
1311          */
1312
1313         current = entry;
1314
1315         while ((current != &map->header) && (current->start < end)) {
1316                 vm_prot_t old_prot;
1317
1318                 vm_map_clip_end(map, current, end);
1319
1320                 old_prot = current->protection;
1321                 if (set_max)
1322                         current->protection =
1323                             (current->max_protection = new_prot) &
1324                             old_prot;
1325                 else
1326                         current->protection = new_prot;
1327
1328                 /*
1329                  * Update physical map if necessary. Worry about copy-on-write
1330                  * here -- CHECK THIS XXX
1331                  */
1332
1333                 if (current->protection != old_prot) {
1334 #define MASK(entry)     (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1335                                                         VM_PROT_ALL)
1336
1337                         pmap_protect(map->pmap, current->start,
1338                             current->end,
1339                             current->protection & MASK(current));
1340 #undef  MASK
1341                 }
1342
1343                 vm_map_simplify_entry(map, current);
1344
1345                 current = current->next;
1346         }
1347
1348         vm_map_unlock(map);
1349         return (KERN_SUCCESS);
1350 }
1351
1352 /*
1353  *      vm_map_madvise:
1354  *
1355  *      This routine traverses a processes map handling the madvise
1356  *      system call.  Advisories are classified as either those effecting
1357  *      the vm_map_entry structure, or those effecting the underlying 
1358  *      objects.
1359  */
1360
1361 int
1362 vm_map_madvise(map, start, end, behav)
1363         vm_map_t map;
1364         vm_offset_t start, end;
1365         int behav;
1366 {
1367         vm_map_entry_t current, entry;
1368         int modify_map = 0;
1369
1370         /*
1371          * Some madvise calls directly modify the vm_map_entry, in which case
1372          * we need to use an exclusive lock on the map and we need to perform 
1373          * various clipping operations.  Otherwise we only need a read-lock
1374          * on the map.
1375          */
1376
1377         switch(behav) {
1378         case MADV_NORMAL:
1379         case MADV_SEQUENTIAL:
1380         case MADV_RANDOM:
1381         case MADV_NOSYNC:
1382         case MADV_AUTOSYNC:
1383         case MADV_NOCORE:
1384         case MADV_CORE:
1385                 modify_map = 1;
1386                 vm_map_lock(map);
1387                 break;
1388         case MADV_WILLNEED:
1389         case MADV_DONTNEED:
1390         case MADV_FREE:
1391                 vm_map_lock_read(map);
1392                 break;
1393         default:
1394                 return (KERN_INVALID_ARGUMENT);
1395         }
1396
1397         /*
1398          * Locate starting entry and clip if necessary.
1399          */
1400
1401         VM_MAP_RANGE_CHECK(map, start, end);
1402
1403         if (vm_map_lookup_entry(map, start, &entry)) {
1404                 if (modify_map)
1405                         vm_map_clip_start(map, entry, start);
1406         } else {
1407                 entry = entry->next;
1408         }
1409
1410         if (modify_map) {
1411                 /*
1412                  * madvise behaviors that are implemented in the vm_map_entry.
1413                  *
1414                  * We clip the vm_map_entry so that behavioral changes are
1415                  * limited to the specified address range.
1416                  */
1417                 for (current = entry;
1418                      (current != &map->header) && (current->start < end);
1419                      current = current->next
1420                 ) {
1421                         if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
1422                                 continue;
1423
1424                         vm_map_clip_end(map, current, end);
1425
1426                         switch (behav) {
1427                         case MADV_NORMAL:
1428                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
1429                                 break;
1430                         case MADV_SEQUENTIAL:
1431                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
1432                                 break;
1433                         case MADV_RANDOM:
1434                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
1435                                 break;
1436                         case MADV_NOSYNC:
1437                                 current->eflags |= MAP_ENTRY_NOSYNC;
1438                                 break;
1439                         case MADV_AUTOSYNC:
1440                                 current->eflags &= ~MAP_ENTRY_NOSYNC;
1441                                 break;
1442                         case MADV_NOCORE:
1443                                 current->eflags |= MAP_ENTRY_NOCOREDUMP;
1444                                 break;
1445                         case MADV_CORE:
1446                                 current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
1447                                 break;
1448                         default:
1449                                 break;
1450                         }
1451                         vm_map_simplify_entry(map, current);
1452                 }
1453                 vm_map_unlock(map);
1454         } else {
1455                 vm_pindex_t pindex;
1456                 int count;
1457
1458                 /*
1459                  * madvise behaviors that are implemented in the underlying
1460                  * vm_object.
1461                  *
1462                  * Since we don't clip the vm_map_entry, we have to clip
1463                  * the vm_object pindex and count.
1464                  */
1465                 for (current = entry;
1466                      (current != &map->header) && (current->start < end);
1467                      current = current->next
1468                 ) {
1469                         vm_offset_t useStart;
1470
1471                         if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
1472                                 continue;
1473
1474                         pindex = OFF_TO_IDX(current->offset);
1475                         count = atop(current->end - current->start);
1476                         useStart = current->start;
1477
1478                         if (current->start < start) {
1479                                 pindex += atop(start - current->start);
1480                                 count -= atop(start - current->start);
1481                                 useStart = start;
1482                         }
1483                         if (current->end > end)
1484                                 count -= atop(current->end - end);
1485
1486                         if (count <= 0)
1487                                 continue;
1488
1489                         vm_object_madvise(current->object.vm_object,
1490                                           pindex, count, behav);
1491                         if (behav == MADV_WILLNEED) {
1492                                 pmap_object_init_pt(
1493                                     map->pmap, 
1494                                     useStart,
1495                                     current->object.vm_object,
1496                                     pindex, 
1497                                     (count << PAGE_SHIFT),
1498                                     MAP_PREFAULT_MADVISE
1499                                 );
1500                         }
1501                 }
1502                 vm_map_unlock_read(map);
1503         }
1504         return(0);
1505 }       
1506
1507
1508 /*
1509  *      vm_map_inherit:
1510  *
1511  *      Sets the inheritance of the specified address
1512  *      range in the target map.  Inheritance
1513  *      affects how the map will be shared with
1514  *      child maps at the time of vm_map_fork.
1515  */
1516 int
1517 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
1518                vm_inherit_t new_inheritance)
1519 {
1520         vm_map_entry_t entry;
1521         vm_map_entry_t temp_entry;
1522
1523         switch (new_inheritance) {
1524         case VM_INHERIT_NONE:
1525         case VM_INHERIT_COPY:
1526         case VM_INHERIT_SHARE:
1527                 break;
1528         default:
1529                 return (KERN_INVALID_ARGUMENT);
1530         }
1531
1532         vm_map_lock(map);
1533
1534         VM_MAP_RANGE_CHECK(map, start, end);
1535
1536         if (vm_map_lookup_entry(map, start, &temp_entry)) {
1537                 entry = temp_entry;
1538                 vm_map_clip_start(map, entry, start);
1539         } else
1540                 entry = temp_entry->next;
1541
1542         while ((entry != &map->header) && (entry->start < end)) {
1543                 vm_map_clip_end(map, entry, end);
1544
1545                 entry->inheritance = new_inheritance;
1546
1547                 vm_map_simplify_entry(map, entry);
1548
1549                 entry = entry->next;
1550         }
1551
1552         vm_map_unlock(map);
1553         return (KERN_SUCCESS);
1554 }
1555
1556 /*
1557  * Implement the semantics of mlock
1558  */
1559 int
1560 vm_map_user_pageable(map, start, real_end, new_pageable)
1561         vm_map_t map;
1562         vm_offset_t start;
1563         vm_offset_t real_end;
1564         boolean_t new_pageable;
1565 {
1566         vm_map_entry_t entry;
1567         vm_map_entry_t start_entry;
1568         vm_offset_t end;
1569         int rv = KERN_SUCCESS;
1570
1571         vm_map_lock(map);
1572         VM_MAP_RANGE_CHECK(map, start, real_end);
1573         end = real_end;
1574
1575         start_entry = vm_map_clip_range(map, start, end, MAP_CLIP_NO_HOLES);
1576         if (start_entry == NULL) {
1577                 vm_map_unlock(map);
1578                 return (KERN_INVALID_ADDRESS);
1579         }
1580
1581         if (new_pageable == 0) {
1582                 entry = start_entry;
1583                 while ((entry != &map->header) && (entry->start < end)) {
1584                         vm_offset_t save_start;
1585                         vm_offset_t save_end;
1586
1587                         /*
1588                          * Already user wired or hard wired (trivial cases)
1589                          */
1590                         if (entry->eflags & MAP_ENTRY_USER_WIRED) {
1591                                 entry = entry->next;
1592                                 continue;
1593                         }
1594                         if (entry->wired_count != 0) {
1595                                 entry->wired_count++;
1596                                 entry->eflags |= MAP_ENTRY_USER_WIRED;
1597                                 entry = entry->next;
1598                                 continue;
1599                         }
1600
1601                         /*
1602                          * A new wiring requires instantiation of appropriate
1603                          * management structures and the faulting in of the
1604                          * page.
1605                          */
1606                         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1607                                 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
1608                                 if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) {
1609
1610                                         vm_object_shadow(&entry->object.vm_object,
1611                                             &entry->offset,
1612                                             atop(entry->end - entry->start));
1613                                         entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
1614
1615                                 } else if (entry->object.vm_object == NULL &&
1616                                            !map->system_map) {
1617
1618                                         entry->object.vm_object =
1619                                             vm_object_allocate(OBJT_DEFAULT,
1620                                                 atop(entry->end - entry->start));
1621                                         entry->offset = (vm_offset_t) 0;
1622
1623                                 }
1624                         }
1625                         entry->wired_count++;
1626                         entry->eflags |= MAP_ENTRY_USER_WIRED;
1627
1628                         /*
1629                          * Now fault in the area.  The map lock needs to be
1630                          * manipulated to avoid deadlocks.  The in-transition
1631                          * flag protects the entries. 
1632                          */
1633                         save_start = entry->start;
1634                         save_end = entry->end;
1635                         vm_map_unlock(map);
1636                         map->timestamp++;
1637                         rv = vm_fault_user_wire(map, save_start, save_end);
1638                         vm_map_lock(map);
1639                         if (rv) {
1640                                 CLIP_CHECK_BACK(entry, save_start);
1641                                 for (;;) {
1642                                         KASSERT(entry->wired_count == 1, ("bad wired_count on entry"));
1643                                         entry->eflags &= ~MAP_ENTRY_USER_WIRED;
1644                                         entry->wired_count = 0;
1645                                         if (entry->end == save_end)
1646                                                 break;
1647                                         entry = entry->next;
1648                                         KASSERT(entry != &map->header, ("bad entry clip during backout"));
1649                                 }
1650                                 end = save_start;       /* unwire the rest */
1651                                 break;
1652                         }
1653                         /*
1654                          * note that even though the entry might have been
1655                          * clipped, the USER_WIRED flag we set prevents
1656                          * duplication so we do not have to do a 
1657                          * clip check.
1658                          */
1659                         entry = entry->next;
1660                 }
1661
1662                 /*
1663                  * If we failed fall through to the unwiring section to
1664                  * unwire what we had wired so far.  'end' has already
1665                  * been adjusted.
1666                  */
1667                 if (rv)
1668                         new_pageable = 1;
1669
1670                 /*
1671                  * start_entry might have been clipped if we unlocked the
1672                  * map and blocked.  No matter how clipped it has gotten
1673                  * there should be a fragment that is on our start boundary.
1674                  */
1675                 CLIP_CHECK_BACK(start_entry, start);
1676         }
1677
1678         /*
1679          * Deal with the unwiring case.
1680          */
1681         if (new_pageable) {
1682                 /*
1683                  * This is the unwiring case.  We must first ensure that the
1684                  * range to be unwired is really wired down.  We know there
1685                  * are no holes.
1686                  */
1687                 entry = start_entry;
1688                 while ((entry != &map->header) && (entry->start < end)) {
1689                         if ((entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
1690                                 rv = KERN_INVALID_ARGUMENT;
1691                                 goto done;
1692                         }
1693                         KASSERT(entry->wired_count != 0, ("wired count was 0 with USER_WIRED set! %p", entry));
1694                         entry = entry->next;
1695                 }
1696
1697                 /*
1698                  * Now decrement the wiring count for each region. If a region
1699                  * becomes completely unwired, unwire its physical pages and
1700                  * mappings.
1701                  */
1702                 while ((entry != &map->header) && (entry->start < end)) {
1703                         KASSERT(entry->eflags & MAP_ENTRY_USER_WIRED, ("expected USER_WIRED on entry %p", entry));
1704                         entry->eflags &= ~MAP_ENTRY_USER_WIRED;
1705                         entry->wired_count--;
1706                         if (entry->wired_count == 0)
1707                                 vm_fault_unwire(map, entry->start, entry->end);
1708                         entry = entry->next;
1709                 }
1710         }
1711 done:
1712         vm_map_unclip_range(map, start_entry, start, real_end, 
1713                 MAP_CLIP_NO_HOLES);
1714         map->timestamp++;
1715         vm_map_unlock(map);
1716         return (rv);
1717 }
1718
1719 /*
1720  *      vm_map_pageable:
1721  *
1722  *      Sets the pageability of the specified address
1723  *      range in the target map.  Regions specified
1724  *      as not pageable require locked-down physical
1725  *      memory and physical page maps.
1726  *
1727  *      The map must not be locked, but a reference
1728  *      must remain to the map throughout the call.
1729  */
1730 int
1731 vm_map_pageable(map, start, real_end, new_pageable)
1732         vm_map_t map;
1733         vm_offset_t start;
1734         vm_offset_t real_end;
1735         boolean_t new_pageable;
1736 {
1737         vm_map_entry_t entry;
1738         vm_map_entry_t start_entry;
1739         vm_offset_t end;
1740         int rv = KERN_SUCCESS;
1741         int s;
1742
1743         vm_map_lock(map);
1744         VM_MAP_RANGE_CHECK(map, start, real_end);
1745         end = real_end;
1746
1747         start_entry = vm_map_clip_range(map, start, end, MAP_CLIP_NO_HOLES);
1748         if (start_entry == NULL) {
1749                 vm_map_unlock(map);
1750                 return (KERN_INVALID_ADDRESS);
1751         }
1752         if (new_pageable == 0) {
1753                 /*
1754                  * Wiring.  
1755                  *
1756                  * 1.  Holding the write lock, we create any shadow or zero-fill
1757                  * objects that need to be created. Then we clip each map
1758                  * entry to the region to be wired and increment its wiring
1759                  * count.  We create objects before clipping the map entries
1760                  * to avoid object proliferation.
1761                  *
1762                  * 2.  We downgrade to a read lock, and call vm_fault_wire to
1763                  * fault in the pages for any newly wired area (wired_count is
1764                  * 1).
1765                  *
1766                  * Downgrading to a read lock for vm_fault_wire avoids a 
1767                  * possible deadlock with another process that may have faulted
1768                  * on one of the pages to be wired (it would mark the page busy,
1769                  * blocking us, then in turn block on the map lock that we
1770                  * hold).  Because of problems in the recursive lock package,
1771                  * we cannot upgrade to a write lock in vm_map_lookup.  Thus,
1772                  * any actions that require the write lock must be done
1773                  * beforehand.  Because we keep the read lock on the map, the
1774                  * copy-on-write status of the entries we modify here cannot
1775                  * change.
1776                  */
1777
1778                 entry = start_entry;
1779                 while ((entry != &map->header) && (entry->start < end)) {
1780                         /*
1781                          * Trivial case if the entry is already wired
1782                          */
1783                         if (entry->wired_count) {
1784                                 entry->wired_count++;
1785                                 entry = entry->next;
1786                                 continue;
1787                         }
1788
1789                         /*
1790                          * The entry is being newly wired, we have to setup
1791                          * appropriate management structures.  A shadow 
1792                          * object is required for a copy-on-write region,
1793                          * or a normal object for a zero-fill region.  We
1794                          * do not have to do this for entries that point to sub
1795                          * maps because we won't hold the lock on the sub map.
1796                          */
1797                         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1798                                 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
1799                                 if (copyflag &&
1800                                     ((entry->protection & VM_PROT_WRITE) != 0)) {
1801
1802                                         vm_object_shadow(&entry->object.vm_object,
1803                                             &entry->offset,
1804                                             atop(entry->end - entry->start));
1805                                         entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
1806                                 } else if (entry->object.vm_object == NULL &&
1807                                            !map->system_map) {
1808                                         entry->object.vm_object =
1809                                             vm_object_allocate(OBJT_DEFAULT,
1810                                                 atop(entry->end - entry->start));
1811                                         entry->offset = (vm_offset_t) 0;
1812                                 }
1813                         }
1814
1815                         entry->wired_count++;
1816                         entry = entry->next;
1817                 }
1818
1819                 /*
1820                  * Pass 2.
1821                  */
1822
1823                 /*
1824                  * HACK HACK HACK HACK
1825                  *
1826                  * Unlock the map to avoid deadlocks.  The in-transit flag
1827                  * protects us from most changes but note that
1828                  * clipping may still occur.  To prevent clipping from
1829                  * occuring after the unlock, except for when we are
1830                  * blocking in vm_fault_wire, we must run at splvm().
1831                  * Otherwise our accesses to entry->start and entry->end
1832                  * could be corrupted.  We have to set splvm() prior to
1833                  * unlocking so start_entry does not change out from
1834                  * under us at the very beginning of the loop.
1835                  *
1836                  * HACK HACK HACK HACK
1837                  */
1838
1839                 s = splvm();
1840                 vm_map_unlock(map);
1841
1842                 entry = start_entry;
1843                 while (entry != &map->header && entry->start < end) {
1844                         /*
1845                          * If vm_fault_wire fails for any page we need to undo
1846                          * what has been done.  We decrement the wiring count
1847                          * for those pages which have not yet been wired (now)
1848                          * and unwire those that have (later).
1849                          */
1850                         vm_offset_t save_start = entry->start;
1851                         vm_offset_t save_end = entry->end;
1852
1853                         if (entry->wired_count == 1)
1854                                 rv = vm_fault_wire(map, entry->start, entry->end);
1855                         if (rv) {
1856                                 CLIP_CHECK_BACK(entry, save_start);
1857                                 for (;;) {
1858                                         KASSERT(entry->wired_count == 1, ("wired_count changed unexpectedly"));
1859                                         entry->wired_count = 0;
1860                                         if (entry->end == save_end)
1861                                                 break;
1862                                         entry = entry->next;
1863                                         KASSERT(entry != &map->header, ("bad entry clip during backout"));
1864                                 }
1865                                 end = save_start;
1866                                 break;
1867                         }
1868                         CLIP_CHECK_FWD(entry, save_end);
1869                         entry = entry->next;
1870                 }
1871                 splx(s);
1872
1873                 /*
1874                  * relock.  start_entry is still IN_TRANSITION and must
1875                  * still exist, but may have been clipped (handled just
1876                  * below).
1877                  */
1878                 vm_map_lock(map);
1879
1880                 /*
1881                  * If a failure occured undo everything by falling through
1882                  * to the unwiring code.  'end' has already been adjusted
1883                  * appropriately.
1884                  */
1885                 if (rv)
1886                         new_pageable = 1;
1887
1888                 /*
1889                  * start_entry might have been clipped if we unlocked the
1890                  * map and blocked.  No matter how clipped it has gotten
1891                  * there should be a fragment that is on our start boundary.
1892                  */
1893                 CLIP_CHECK_BACK(start_entry, start);
1894         }
1895
1896         if (new_pageable) {
1897                 /*
1898                  * This is the unwiring case.  We must first ensure that the
1899                  * range to be unwired is really wired down.  We know there
1900                  * are no holes.
1901                  */
1902                 entry = start_entry;
1903                 while ((entry != &map->header) && (entry->start < end)) {
1904                         if (entry->wired_count == 0) {
1905                                 rv = KERN_INVALID_ARGUMENT;
1906                                 goto done;
1907                         }
1908                         entry = entry->next;
1909                 }
1910
1911                 /*
1912                  * Now decrement the wiring count for each region. If a region
1913                  * becomes completely unwired, unwire its physical pages and
1914                  * mappings.
1915                  */
1916                 entry = start_entry;
1917                 while ((entry != &map->header) && (entry->start < end)) {
1918                         entry->wired_count--;
1919                         if (entry->wired_count == 0)
1920                                 vm_fault_unwire(map, entry->start, entry->end);
1921                         entry = entry->next;
1922                 }
1923         }
1924 done:
1925         vm_map_unclip_range(map, start_entry, start, real_end, 
1926                 MAP_CLIP_NO_HOLES);
1927         map->timestamp++;
1928         vm_map_unlock(map);
1929         return (rv);
1930 }
1931
1932 /*
1933  * vm_map_clean
1934  *
1935  * Push any dirty cached pages in the address range to their pager.
1936  * If syncio is TRUE, dirty pages are written synchronously.
1937  * If invalidate is TRUE, any cached pages are freed as well.
1938  *
1939  * Returns an error if any part of the specified range is not mapped.
1940  */
1941 int
1942 vm_map_clean(map, start, end, syncio, invalidate)
1943         vm_map_t map;
1944         vm_offset_t start;
1945         vm_offset_t end;
1946         boolean_t syncio;
1947         boolean_t invalidate;
1948 {
1949         vm_map_entry_t current;
1950         vm_map_entry_t entry;
1951         vm_size_t size;
1952         vm_object_t object;
1953         vm_ooffset_t offset;
1954
1955         vm_map_lock_read(map);
1956         VM_MAP_RANGE_CHECK(map, start, end);
1957         if (!vm_map_lookup_entry(map, start, &entry)) {
1958                 vm_map_unlock_read(map);
1959                 return (KERN_INVALID_ADDRESS);
1960         }
1961         /*
1962          * Make a first pass to check for holes.
1963          */
1964         for (current = entry; current->start < end; current = current->next) {
1965                 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1966                         vm_map_unlock_read(map);
1967                         return (KERN_INVALID_ARGUMENT);
1968                 }
1969                 if (end > current->end &&
1970                     (current->next == &map->header ||
1971                         current->end != current->next->start)) {
1972                         vm_map_unlock_read(map);
1973                         return (KERN_INVALID_ADDRESS);
1974                 }
1975         }
1976
1977         if (invalidate)
1978                 pmap_remove(vm_map_pmap(map), start, end);
1979         /*
1980          * Make a second pass, cleaning/uncaching pages from the indicated
1981          * objects as we go.
1982          */
1983         for (current = entry; current->start < end; current = current->next) {
1984                 offset = current->offset + (start - current->start);
1985                 size = (end <= current->end ? end : current->end) - start;
1986                 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1987                         vm_map_t smap;
1988                         vm_map_entry_t tentry;
1989                         vm_size_t tsize;
1990
1991                         smap = current->object.sub_map;
1992                         vm_map_lock_read(smap);
1993                         (void) vm_map_lookup_entry(smap, offset, &tentry);
1994                         tsize = tentry->end - offset;
1995                         if (tsize < size)
1996                                 size = tsize;
1997                         object = tentry->object.vm_object;
1998                         offset = tentry->offset + (offset - tentry->start);
1999                         vm_map_unlock_read(smap);
2000                 } else {
2001                         object = current->object.vm_object;
2002                 }
2003                 /*
2004                  * Note that there is absolutely no sense in writing out
2005                  * anonymous objects, so we track down the vnode object
2006                  * to write out.
2007                  * We invalidate (remove) all pages from the address space
2008                  * anyway, for semantic correctness.
2009                  *
2010                  * note: certain anonymous maps, such as MAP_NOSYNC maps,
2011                  * may start out with a NULL object.
2012                  */
2013                 while (object && object->backing_object) {
2014                         object = object->backing_object;
2015                         offset += object->backing_object_offset;
2016                         if (object->size < OFF_TO_IDX( offset + size))
2017                                 size = IDX_TO_OFF(object->size) - offset;
2018                 }
2019                 if (object && (object->type == OBJT_VNODE) && 
2020                     (current->protection & VM_PROT_WRITE)) {
2021                         /*
2022                          * Flush pages if writing is allowed, invalidate them
2023                          * if invalidation requested.  Pages undergoing I/O
2024                          * will be ignored by vm_object_page_remove().
2025                          *
2026                          * We cannot lock the vnode and then wait for paging
2027                          * to complete without deadlocking against vm_fault.
2028                          * Instead we simply call vm_object_page_remove() and
2029                          * allow it to block internally on a page-by-page 
2030                          * basis when it encounters pages undergoing async 
2031                          * I/O.
2032                          */
2033                         int flags;
2034
2035                         vm_object_reference(object);
2036                         vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curthread);
2037                         flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
2038                         flags |= invalidate ? OBJPC_INVAL : 0;
2039                         vm_object_page_clean(object,
2040                             OFF_TO_IDX(offset),
2041                             OFF_TO_IDX(offset + size + PAGE_MASK),
2042                             flags);
2043                         VOP_UNLOCK(object->handle, 0, curthread);
2044                         vm_object_deallocate(object);
2045                 }
2046                 if (object && invalidate &&
2047                    ((object->type == OBJT_VNODE) ||
2048                     (object->type == OBJT_DEVICE))) {
2049                         vm_object_reference(object);
2050                         vm_object_page_remove(object,
2051                             OFF_TO_IDX(offset),
2052                             OFF_TO_IDX(offset + size + PAGE_MASK),
2053                             FALSE);
2054                         vm_object_deallocate(object);
2055                 }
2056                 start += size;
2057         }
2058
2059         vm_map_unlock_read(map);
2060         return (KERN_SUCCESS);
2061 }
2062
2063 /*
2064  *      vm_map_entry_unwire:    [ internal use only ]
2065  *
2066  *      Make the region specified by this entry pageable.
2067  *
2068  *      The map in question should be locked.
2069  *      [This is the reason for this routine's existence.]
2070  */
2071 static void 
2072 vm_map_entry_unwire(map, entry)
2073         vm_map_t map;
2074         vm_map_entry_t entry;
2075 {
2076         vm_fault_unwire(map, entry->start, entry->end);
2077         entry->wired_count = 0;
2078 }
2079
2080 /*
2081  *      vm_map_entry_delete:    [ internal use only ]
2082  *
2083  *      Deallocate the given entry from the target map.
2084  */
2085 static void
2086 vm_map_entry_delete(map, entry)
2087         vm_map_t map;
2088         vm_map_entry_t entry;
2089 {
2090         vm_map_entry_unlink(map, entry);
2091         map->size -= entry->end - entry->start;
2092
2093         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
2094                 vm_object_deallocate(entry->object.vm_object);
2095         }
2096
2097         vm_map_entry_dispose(map, entry);
2098 }
2099
2100 /*
2101  *      vm_map_delete:  [ internal use only ]
2102  *
2103  *      Deallocates the given address range from the target
2104  *      map.
2105  */
2106 int
2107 vm_map_delete(map, start, end)
2108         vm_map_t map;
2109         vm_offset_t start;
2110         vm_offset_t end;
2111 {
2112         vm_object_t object;
2113         vm_map_entry_t entry;
2114         vm_map_entry_t first_entry;
2115
2116         /*
2117          * Find the start of the region, and clip it
2118          */
2119
2120 again:
2121         if (!vm_map_lookup_entry(map, start, &first_entry))
2122                 entry = first_entry->next;
2123         else {
2124                 entry = first_entry;
2125                 vm_map_clip_start(map, entry, start);
2126                 /*
2127                  * Fix the lookup hint now, rather than each time though the
2128                  * loop.
2129                  */
2130                 SAVE_HINT(map, entry->prev);
2131         }
2132
2133         /*
2134          * Save the free space hint
2135          */
2136
2137         if (entry == &map->header) {
2138                 map->first_free = &map->header;
2139         } else if (map->first_free->start >= start) {
2140                 map->first_free = entry->prev;
2141         }
2142
2143         /*
2144          * Step through all entries in this region
2145          */
2146
2147         while ((entry != &map->header) && (entry->start < end)) {
2148                 vm_map_entry_t next;
2149                 vm_offset_t s, e;
2150                 vm_pindex_t offidxstart, offidxend, count;
2151
2152                 /*
2153                  * If we hit an in-transition entry we have to sleep and
2154                  * retry.  It's easier (and not really slower) to just retry
2155                  * since this case occurs so rarely and the hint is already
2156                  * pointing at the right place.  We have to reset the
2157                  * start offset so as not to accidently delete an entry
2158                  * another process just created in vacated space.
2159                  */
2160                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2161                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2162                         start = entry->start;
2163                         ++mycpu->gd_cnt.v_intrans_coll;
2164                         ++mycpu->gd_cnt.v_intrans_wait;
2165                         vm_map_transition_wait(map);
2166                         goto again;
2167                 }
2168                 vm_map_clip_end(map, entry, end);
2169
2170                 s = entry->start;
2171                 e = entry->end;
2172                 next = entry->next;
2173
2174                 offidxstart = OFF_TO_IDX(entry->offset);
2175                 count = OFF_TO_IDX(e - s);
2176                 object = entry->object.vm_object;
2177
2178                 /*
2179                  * Unwire before removing addresses from the pmap; otherwise,
2180                  * unwiring will put the entries back in the pmap.
2181                  */
2182                 if (entry->wired_count != 0) {
2183                         vm_map_entry_unwire(map, entry);
2184                 }
2185
2186                 offidxend = offidxstart + count;
2187
2188                 if ((object == kernel_object) || (object == kmem_object)) {
2189                         vm_object_page_remove(object, offidxstart, offidxend, FALSE);
2190                 } else {
2191                         pmap_remove(map->pmap, s, e);
2192                         if (object != NULL &&
2193                             object->ref_count != 1 &&
2194                             (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING &&
2195                             (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
2196                                 vm_object_collapse(object);
2197                                 vm_object_page_remove(object, offidxstart, offidxend, FALSE);
2198                                 if (object->type == OBJT_SWAP) {
2199                                         swap_pager_freespace(object, offidxstart, count);
2200                                 }
2201                                 if (offidxend >= object->size &&
2202                                     offidxstart < object->size) {
2203                                         object->size = offidxstart;
2204                                 }
2205                         }
2206                 }
2207
2208                 /*
2209                  * Delete the entry (which may delete the object) only after
2210                  * removing all pmap entries pointing to its pages.
2211                  * (Otherwise, its page frames may be reallocated, and any
2212                  * modify bits will be set in the wrong object!)
2213                  */
2214                 vm_map_entry_delete(map, entry);
2215                 entry = next;
2216         }
2217         return (KERN_SUCCESS);
2218 }
2219
2220 /*
2221  *      vm_map_remove:
2222  *
2223  *      Remove the given address range from the target map.
2224  *      This is the exported form of vm_map_delete.
2225  */
2226 int
2227 vm_map_remove(map, start, end)
2228         vm_map_t map;
2229         vm_offset_t start;
2230         vm_offset_t end;
2231 {
2232         int result, s = 0;
2233
2234         if (map == kmem_map || map == mb_map)
2235                 s = splvm();
2236
2237         vm_map_lock(map);
2238         VM_MAP_RANGE_CHECK(map, start, end);
2239         result = vm_map_delete(map, start, end);
2240         vm_map_unlock(map);
2241
2242         if (map == kmem_map || map == mb_map)
2243                 splx(s);
2244
2245         return (result);
2246 }
2247
2248 /*
2249  *      vm_map_check_protection:
2250  *
2251  *      Assert that the target map allows the specified
2252  *      privilege on the entire address region given.
2253  *      The entire region must be allocated.
2254  */
2255 boolean_t
2256 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
2257                         vm_prot_t protection)
2258 {
2259         vm_map_entry_t entry;
2260         vm_map_entry_t tmp_entry;
2261
2262         if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
2263                 return (FALSE);
2264         }
2265         entry = tmp_entry;
2266
2267         while (start < end) {
2268                 if (entry == &map->header) {
2269                         return (FALSE);
2270                 }
2271                 /*
2272                  * No holes allowed!
2273                  */
2274
2275                 if (start < entry->start) {
2276                         return (FALSE);
2277                 }
2278                 /*
2279                  * Check protection associated with entry.
2280                  */
2281
2282                 if ((entry->protection & protection) != protection) {
2283                         return (FALSE);
2284                 }
2285                 /* go to next entry */
2286
2287                 start = entry->end;
2288                 entry = entry->next;
2289         }
2290         return (TRUE);
2291 }
2292
2293 /*
2294  * Split the pages in a map entry into a new object.  This affords
2295  * easier removal of unused pages, and keeps object inheritance from
2296  * being a negative impact on memory usage.
2297  */
2298 static void
2299 vm_map_split(entry)
2300         vm_map_entry_t entry;
2301 {
2302         vm_page_t m;
2303         vm_object_t orig_object, new_object, source;
2304         vm_offset_t s, e;
2305         vm_pindex_t offidxstart, offidxend, idx;
2306         vm_size_t size;
2307         vm_ooffset_t offset;
2308
2309         orig_object = entry->object.vm_object;
2310         if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP)
2311                 return;
2312         if (orig_object->ref_count <= 1)
2313                 return;
2314
2315         offset = entry->offset;
2316         s = entry->start;
2317         e = entry->end;
2318
2319         offidxstart = OFF_TO_IDX(offset);
2320         offidxend = offidxstart + OFF_TO_IDX(e - s);
2321         size = offidxend - offidxstart;
2322
2323         new_object = vm_pager_allocate(orig_object->type,
2324                 NULL, IDX_TO_OFF(size), VM_PROT_ALL, 0LL);
2325         if (new_object == NULL)
2326                 return;
2327
2328         source = orig_object->backing_object;
2329         if (source != NULL) {
2330                 vm_object_reference(source);    /* Referenced by new_object */
2331                 LIST_INSERT_HEAD(&source->shadow_head,
2332                                   new_object, shadow_list);
2333                 vm_object_clear_flag(source, OBJ_ONEMAPPING);
2334                 new_object->backing_object_offset = 
2335                         orig_object->backing_object_offset + IDX_TO_OFF(offidxstart);
2336                 new_object->backing_object = source;
2337                 source->shadow_count++;
2338                 source->generation++;
2339         }
2340
2341         for (idx = 0; idx < size; idx++) {
2342                 vm_page_t m;
2343
2344         retry:
2345                 m = vm_page_lookup(orig_object, offidxstart + idx);
2346                 if (m == NULL)
2347                         continue;
2348
2349                 /*
2350                  * We must wait for pending I/O to complete before we can
2351                  * rename the page.
2352                  *
2353                  * We do not have to VM_PROT_NONE the page as mappings should
2354                  * not be changed by this operation.
2355                  */
2356                 if (vm_page_sleep_busy(m, TRUE, "spltwt"))
2357                         goto retry;
2358                         
2359                 vm_page_busy(m);
2360                 vm_page_rename(m, new_object, idx);
2361                 /* page automatically made dirty by rename and cache handled */
2362                 vm_page_busy(m);
2363         }
2364
2365         if (orig_object->type == OBJT_SWAP) {
2366                 vm_object_pip_add(orig_object, 1);
2367                 /*
2368                  * copy orig_object pages into new_object
2369                  * and destroy unneeded pages in
2370                  * shadow object.
2371                  */
2372                 swap_pager_copy(orig_object, new_object, offidxstart, 0);
2373                 vm_object_pip_wakeup(orig_object);
2374         }
2375
2376         for (idx = 0; idx < size; idx++) {
2377                 m = vm_page_lookup(new_object, idx);
2378                 if (m) {
2379                         vm_page_wakeup(m);
2380                 }
2381         }
2382
2383         entry->object.vm_object = new_object;
2384         entry->offset = 0LL;
2385         vm_object_deallocate(orig_object);
2386 }
2387
2388 /*
2389  *      vm_map_copy_entry:
2390  *
2391  *      Copies the contents of the source entry to the destination
2392  *      entry.  The entries *must* be aligned properly.
2393  */
2394 static void
2395 vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
2396         vm_map_t src_map, dst_map;
2397         vm_map_entry_t src_entry, dst_entry;
2398 {
2399         vm_object_t src_object;
2400
2401         if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
2402                 return;
2403
2404         if (src_entry->wired_count == 0) {
2405
2406                 /*
2407                  * If the source entry is marked needs_copy, it is already
2408                  * write-protected.
2409                  */
2410                 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
2411                         pmap_protect(src_map->pmap,
2412                             src_entry->start,
2413                             src_entry->end,
2414                             src_entry->protection & ~VM_PROT_WRITE);
2415                 }
2416
2417                 /*
2418                  * Make a copy of the object.
2419                  */
2420                 if ((src_object = src_entry->object.vm_object) != NULL) {
2421
2422                         if ((src_object->handle == NULL) &&
2423                                 (src_object->type == OBJT_DEFAULT ||
2424                                  src_object->type == OBJT_SWAP)) {
2425                                 vm_object_collapse(src_object);
2426                                 if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
2427                                         vm_map_split(src_entry);
2428                                         src_object = src_entry->object.vm_object;
2429                                 }
2430                         }
2431
2432                         vm_object_reference(src_object);
2433                         vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
2434                         dst_entry->object.vm_object = src_object;
2435                         src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2436                         dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2437                         dst_entry->offset = src_entry->offset;
2438                 } else {
2439                         dst_entry->object.vm_object = NULL;
2440                         dst_entry->offset = 0;
2441                 }
2442
2443                 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
2444                     dst_entry->end - dst_entry->start, src_entry->start);
2445         } else {
2446                 /*
2447                  * Of course, wired down pages can't be set copy-on-write.
2448                  * Cause wired pages to be copied into the new map by
2449                  * simulating faults (the new pages are pageable)
2450                  */
2451                 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
2452         }
2453 }
2454
2455 /*
2456  * vmspace_fork:
2457  * Create a new process vmspace structure and vm_map
2458  * based on those of an existing process.  The new map
2459  * is based on the old map, according to the inheritance
2460  * values on the regions in that map.
2461  *
2462  * The source map must not be locked.
2463  */
2464 struct vmspace *
2465 vmspace_fork(vm1)
2466         struct vmspace *vm1;
2467 {
2468         struct vmspace *vm2;
2469         vm_map_t old_map = &vm1->vm_map;
2470         vm_map_t new_map;
2471         vm_map_entry_t old_entry;
2472         vm_map_entry_t new_entry;
2473         vm_object_t object;
2474
2475         vm_map_lock(old_map);
2476         old_map->infork = 1;
2477
2478         vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
2479         bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
2480             (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
2481         new_map = &vm2->vm_map; /* XXX */
2482         new_map->timestamp = 1;
2483
2484         old_entry = old_map->header.next;
2485
2486         while (old_entry != &old_map->header) {
2487                 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
2488                         panic("vm_map_fork: encountered a submap");
2489
2490                 switch (old_entry->inheritance) {
2491                 case VM_INHERIT_NONE:
2492                         break;
2493
2494                 case VM_INHERIT_SHARE:
2495                         /*
2496                          * Clone the entry, creating the shared object if necessary.
2497                          */
2498                         object = old_entry->object.vm_object;
2499                         if (object == NULL) {
2500                                 object = vm_object_allocate(OBJT_DEFAULT,
2501                                         atop(old_entry->end - old_entry->start));
2502                                 old_entry->object.vm_object = object;
2503                                 old_entry->offset = (vm_offset_t) 0;
2504                         }
2505
2506                         /*
2507                          * Add the reference before calling vm_object_shadow
2508                          * to insure that a shadow object is created.
2509                          */
2510                         vm_object_reference(object);
2511                         if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2512                                 vm_object_shadow(&old_entry->object.vm_object,
2513                                         &old_entry->offset,
2514                                         atop(old_entry->end - old_entry->start));
2515                                 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
2516                                 /* Transfer the second reference too. */
2517                                 vm_object_reference(
2518                                     old_entry->object.vm_object);
2519                                 vm_object_deallocate(object);
2520                                 object = old_entry->object.vm_object;
2521                         }
2522                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
2523
2524                         /*
2525                          * Clone the entry, referencing the shared object.
2526                          */
2527                         new_entry = vm_map_entry_create(new_map);
2528                         *new_entry = *old_entry;
2529                         new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2530                         new_entry->wired_count = 0;
2531
2532                         /*
2533                          * Insert the entry into the new map -- we know we're
2534                          * inserting at the end of the new map.
2535                          */
2536
2537                         vm_map_entry_link(new_map, new_map->header.prev,
2538                             new_entry);
2539
2540                         /*
2541                          * Update the physical map
2542                          */
2543
2544                         pmap_copy(new_map->pmap, old_map->pmap,
2545                             new_entry->start,
2546                             (old_entry->end - old_entry->start),
2547                             old_entry->start);
2548                         break;
2549
2550                 case VM_INHERIT_COPY:
2551                         /*
2552                          * Clone the entry and link into the map.
2553                          */
2554                         new_entry = vm_map_entry_create(new_map);
2555                         *new_entry = *old_entry;
2556                         new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2557                         new_entry->wired_count = 0;
2558                         new_entry->object.vm_object = NULL;
2559                         vm_map_entry_link(new_map, new_map->header.prev,
2560                             new_entry);
2561                         vm_map_copy_entry(old_map, new_map, old_entry,
2562                             new_entry);
2563                         break;
2564                 }
2565                 old_entry = old_entry->next;
2566         }
2567
2568         new_map->size = old_map->size;
2569         old_map->infork = 0;
2570         vm_map_unlock(old_map);
2571
2572         return (vm2);
2573 }
2574
2575 int
2576 vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
2577               vm_prot_t prot, vm_prot_t max, int cow)
2578 {
2579         vm_map_entry_t prev_entry;
2580         vm_map_entry_t new_stack_entry;
2581         vm_size_t      init_ssize;
2582         int            rv;
2583
2584         if (VM_MIN_ADDRESS > 0 && addrbos < VM_MIN_ADDRESS)
2585                 return (KERN_NO_SPACE);
2586
2587         if (max_ssize < sgrowsiz)
2588                 init_ssize = max_ssize;
2589         else
2590                 init_ssize = sgrowsiz;
2591
2592         vm_map_lock(map);
2593
2594         /* If addr is already mapped, no go */
2595         if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
2596                 vm_map_unlock(map);
2597                 return (KERN_NO_SPACE);
2598         }
2599
2600         /* If we would blow our VMEM resource limit, no go */
2601         if (map->size + init_ssize >
2602             curproc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
2603                 vm_map_unlock(map);
2604                 return (KERN_NO_SPACE);
2605         }
2606
2607         /* If we can't accomodate max_ssize in the current mapping,
2608          * no go.  However, we need to be aware that subsequent user
2609          * mappings might map into the space we have reserved for
2610          * stack, and currently this space is not protected.  
2611          * 
2612          * Hopefully we will at least detect this condition 
2613          * when we try to grow the stack.
2614          */
2615         if ((prev_entry->next != &map->header) &&
2616             (prev_entry->next->start < addrbos + max_ssize)) {
2617                 vm_map_unlock(map);
2618                 return (KERN_NO_SPACE);
2619         }
2620
2621         /* We initially map a stack of only init_ssize.  We will
2622          * grow as needed later.  Since this is to be a grow 
2623          * down stack, we map at the top of the range.
2624          *
2625          * Note: we would normally expect prot and max to be
2626          * VM_PROT_ALL, and cow to be 0.  Possibly we should
2627          * eliminate these as input parameters, and just
2628          * pass these values here in the insert call.
2629          */
2630         rv = vm_map_insert(map, NULL, 0, addrbos + max_ssize - init_ssize,
2631                            addrbos + max_ssize, prot, max, cow);
2632
2633         /* Now set the avail_ssize amount */
2634         if (rv == KERN_SUCCESS){
2635                 if (prev_entry != &map->header)
2636                         vm_map_clip_end(map, prev_entry, addrbos + max_ssize - init_ssize);
2637                 new_stack_entry = prev_entry->next;
2638                 if (new_stack_entry->end   != addrbos + max_ssize ||
2639                     new_stack_entry->start != addrbos + max_ssize - init_ssize)
2640                         panic ("Bad entry start/end for new stack entry");
2641                 else 
2642                         new_stack_entry->avail_ssize = max_ssize - init_ssize;
2643         }
2644
2645         vm_map_unlock(map);
2646         return (rv);
2647 }
2648
2649 /* Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
2650  * desired address is already mapped, or if we successfully grow
2651  * the stack.  Also returns KERN_SUCCESS if addr is outside the
2652  * stack range (this is strange, but preserves compatibility with
2653  * the grow function in vm_machdep.c).
2654  */
2655 int
2656 vm_map_growstack (struct proc *p, vm_offset_t addr)
2657 {
2658         vm_map_entry_t prev_entry;
2659         vm_map_entry_t stack_entry;
2660         vm_map_entry_t new_stack_entry;
2661         struct vmspace *vm = p->p_vmspace;
2662         vm_map_t map = &vm->vm_map;
2663         vm_offset_t    end;
2664         int      grow_amount;
2665         int      rv = KERN_SUCCESS;
2666         int      is_procstack;
2667         int      use_read_lock = 1;
2668
2669 Retry:
2670         if (use_read_lock)
2671                 vm_map_lock_read(map);
2672         else
2673                 vm_map_lock(map);
2674
2675         /* If addr is already in the entry range, no need to grow.*/
2676         if (vm_map_lookup_entry(map, addr, &prev_entry))
2677                 goto done;
2678
2679         if ((stack_entry = prev_entry->next) == &map->header)
2680                 goto done;
2681         if (prev_entry == &map->header) 
2682                 end = stack_entry->start - stack_entry->avail_ssize;
2683         else
2684                 end = prev_entry->end;
2685
2686         /* This next test mimics the old grow function in vm_machdep.c.
2687          * It really doesn't quite make sense, but we do it anyway
2688          * for compatibility.
2689          *
2690          * If not growable stack, return success.  This signals the
2691          * caller to proceed as he would normally with normal vm.
2692          */
2693         if (stack_entry->avail_ssize < 1 ||
2694             addr >= stack_entry->start ||
2695             addr <  stack_entry->start - stack_entry->avail_ssize) {
2696                 goto done;
2697         } 
2698         
2699         /* Find the minimum grow amount */
2700         grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE);
2701         if (grow_amount > stack_entry->avail_ssize) {
2702                 rv = KERN_NO_SPACE;
2703                 goto done;
2704         }
2705
2706         /* If there is no longer enough space between the entries
2707          * nogo, and adjust the available space.  Note: this 
2708          * should only happen if the user has mapped into the
2709          * stack area after the stack was created, and is
2710          * probably an error.
2711          *
2712          * This also effectively destroys any guard page the user
2713          * might have intended by limiting the stack size.
2714          */
2715         if (grow_amount > stack_entry->start - end) {
2716                 if (use_read_lock && vm_map_lock_upgrade(map)) {
2717                         use_read_lock = 0;
2718                         goto Retry;
2719                 }
2720                 use_read_lock = 0;
2721                 stack_entry->avail_ssize = stack_entry->start - end;
2722                 rv = KERN_NO_SPACE;
2723                 goto done;
2724         }
2725
2726         is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr;
2727
2728         /* If this is the main process stack, see if we're over the 
2729          * stack limit.
2730          */
2731         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
2732                              p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
2733                 rv = KERN_NO_SPACE;
2734                 goto done;
2735         }
2736
2737         /* Round up the grow amount modulo SGROWSIZ */
2738         grow_amount = roundup (grow_amount, sgrowsiz);
2739         if (grow_amount > stack_entry->avail_ssize) {
2740                 grow_amount = stack_entry->avail_ssize;
2741         }
2742         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
2743                              p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
2744                 grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
2745                               ctob(vm->vm_ssize);
2746         }
2747
2748         /* If we would blow our VMEM resource limit, no go */
2749         if (map->size + grow_amount >
2750             curproc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
2751                 rv = KERN_NO_SPACE;
2752                 goto done;
2753         }
2754
2755         if (use_read_lock && vm_map_lock_upgrade(map)) {
2756                 use_read_lock = 0;
2757                 goto Retry;
2758         }
2759         use_read_lock = 0;
2760
2761         /* Get the preliminary new entry start value */
2762         addr = stack_entry->start - grow_amount;
2763
2764         /* If this puts us into the previous entry, cut back our growth
2765          * to the available space.  Also, see the note above.
2766          */
2767         if (addr < end) {
2768                 stack_entry->avail_ssize = stack_entry->start - end;
2769                 addr = end;
2770         }
2771
2772         rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start,
2773                            VM_PROT_ALL,
2774                            VM_PROT_ALL,
2775                            0);
2776
2777         /* Adjust the available stack space by the amount we grew. */
2778         if (rv == KERN_SUCCESS) {
2779                 if (prev_entry != &map->header)
2780                         vm_map_clip_end(map, prev_entry, addr);
2781                 new_stack_entry = prev_entry->next;
2782                 if (new_stack_entry->end   != stack_entry->start  ||
2783                     new_stack_entry->start != addr)
2784                         panic ("Bad stack grow start/end in new stack entry");
2785                 else {
2786                         new_stack_entry->avail_ssize = stack_entry->avail_ssize -
2787                                                         (new_stack_entry->end -
2788                                                          new_stack_entry->start);
2789                         if (is_procstack)
2790                                 vm->vm_ssize += btoc(new_stack_entry->end -
2791                                                      new_stack_entry->start);
2792                 }
2793         }
2794
2795 done:
2796         if (use_read_lock)
2797                 vm_map_unlock_read(map);
2798         else
2799                 vm_map_unlock(map);
2800         return (rv);
2801 }
2802
2803 /*
2804  * Unshare the specified VM space for exec.  If other processes are
2805  * mapped to it, then create a new one.  The new vmspace is null.
2806  */
2807
2808 void
2809 vmspace_exec(struct proc *p) {
2810         struct vmspace *oldvmspace = p->p_vmspace;
2811         struct vmspace *newvmspace;
2812         vm_map_t map = &p->p_vmspace->vm_map;
2813
2814         newvmspace = vmspace_alloc(map->min_offset, map->max_offset);
2815         bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
2816             (caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy);
2817         /*
2818          * This code is written like this for prototype purposes.  The
2819          * goal is to avoid running down the vmspace here, but let the
2820          * other process's that are still using the vmspace to finally
2821          * run it down.  Even though there is little or no chance of blocking
2822          * here, it is a good idea to keep this form for future mods.
2823          */
2824         vmspace_free(oldvmspace);
2825         p->p_vmspace = newvmspace;
2826         pmap_pinit2(vmspace_pmap(newvmspace));
2827         if (p == curproc)
2828                 pmap_activate(p);
2829 }
2830
2831 /*
2832  * Unshare the specified VM space for forcing COW.  This
2833  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
2834  */
2835
2836 void
2837 vmspace_unshare(struct proc *p) {
2838         struct vmspace *oldvmspace = p->p_vmspace;
2839         struct vmspace *newvmspace;
2840
2841         if (oldvmspace->vm_refcnt == 1)
2842                 return;
2843         newvmspace = vmspace_fork(oldvmspace);
2844         vmspace_free(oldvmspace);
2845         p->p_vmspace = newvmspace;
2846         pmap_pinit2(vmspace_pmap(newvmspace));
2847         if (p == curproc)
2848                 pmap_activate(p);
2849 }
2850         
2851
2852 /*
2853  *      vm_map_lookup:
2854  *
2855  *      Finds the VM object, offset, and
2856  *      protection for a given virtual address in the
2857  *      specified map, assuming a page fault of the
2858  *      type specified.
2859  *
2860  *      Leaves the map in question locked for read; return
2861  *      values are guaranteed until a vm_map_lookup_done
2862  *      call is performed.  Note that the map argument
2863  *      is in/out; the returned map must be used in
2864  *      the call to vm_map_lookup_done.
2865  *
2866  *      A handle (out_entry) is returned for use in
2867  *      vm_map_lookup_done, to make that fast.
2868  *
2869  *      If a lookup is requested with "write protection"
2870  *      specified, the map may be changed to perform virtual
2871  *      copying operations, although the data referenced will
2872  *      remain the same.
2873  */
2874 int
2875 vm_map_lookup(vm_map_t *var_map,                /* IN/OUT */
2876               vm_offset_t vaddr,
2877               vm_prot_t fault_typea,
2878               vm_map_entry_t *out_entry,        /* OUT */
2879               vm_object_t *object,              /* OUT */
2880               vm_pindex_t *pindex,              /* OUT */
2881               vm_prot_t *out_prot,              /* OUT */
2882               boolean_t *wired)                 /* OUT */
2883 {
2884         vm_map_entry_t entry;
2885         vm_map_t map = *var_map;
2886         vm_prot_t prot;
2887         vm_prot_t fault_type = fault_typea;
2888         int use_read_lock = 1;
2889         int rv = KERN_SUCCESS;
2890
2891 RetryLookup:
2892         if (use_read_lock)
2893                 vm_map_lock_read(map);
2894         else
2895                 vm_map_lock(map);
2896
2897         /*
2898          * If the map has an interesting hint, try it before calling full
2899          * blown lookup routine.
2900          */
2901         entry = map->hint;
2902         *out_entry = entry;
2903
2904         if ((entry == &map->header) ||
2905             (vaddr < entry->start) || (vaddr >= entry->end)) {
2906                 vm_map_entry_t tmp_entry;
2907
2908                 /*
2909                  * Entry was either not a valid hint, or the vaddr was not
2910                  * contained in the entry, so do a full lookup.
2911                  */
2912                 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) {
2913                         rv = KERN_INVALID_ADDRESS;
2914                         goto done;
2915                 }
2916
2917                 entry = tmp_entry;
2918                 *out_entry = entry;
2919         }
2920         
2921         /*
2922          * Handle submaps.
2923          */
2924
2925         if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
2926                 vm_map_t old_map = map;
2927
2928                 *var_map = map = entry->object.sub_map;
2929                 if (use_read_lock)
2930                         vm_map_unlock_read(old_map);
2931                 else
2932                         vm_map_unlock(old_map);
2933                 use_read_lock = 1;
2934                 goto RetryLookup;
2935         }
2936
2937         /*
2938          * Check whether this task is allowed to have this page.
2939          * Note the special case for MAP_ENTRY_COW
2940          * pages with an override.  This is to implement a forced
2941          * COW for debuggers.
2942          */
2943
2944         if (fault_type & VM_PROT_OVERRIDE_WRITE)
2945                 prot = entry->max_protection;
2946         else
2947                 prot = entry->protection;
2948
2949         fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
2950         if ((fault_type & prot) != fault_type) {
2951                 rv = KERN_PROTECTION_FAILURE;
2952                 goto done;
2953         }
2954
2955         if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
2956             (entry->eflags & MAP_ENTRY_COW) &&
2957             (fault_type & VM_PROT_WRITE) &&
2958             (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
2959                 rv = KERN_PROTECTION_FAILURE;
2960                 goto done;
2961         }
2962
2963         /*
2964          * If this page is not pageable, we have to get it for all possible
2965          * accesses.
2966          */
2967
2968         *wired = (entry->wired_count != 0);
2969         if (*wired)
2970                 prot = fault_type = entry->protection;
2971
2972         /*
2973          * If the entry was copy-on-write, we either ...
2974          */
2975
2976         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2977                 /*
2978                  * If we want to write the page, we may as well handle that
2979                  * now since we've got the map locked.
2980                  *
2981                  * If we don't need to write the page, we just demote the
2982                  * permissions allowed.
2983                  */
2984
2985                 if (fault_type & VM_PROT_WRITE) {
2986                         /*
2987                          * Make a new object, and place it in the object
2988                          * chain.  Note that no new references have appeared
2989                          * -- one just moved from the map to the new
2990                          * object.
2991                          */
2992
2993                         if (use_read_lock && vm_map_lock_upgrade(map)) {
2994                                 use_read_lock = 0;
2995                                 goto RetryLookup;
2996                         }
2997                         use_read_lock = 0;
2998
2999                         vm_object_shadow(
3000                             &entry->object.vm_object,
3001                             &entry->offset,
3002                             atop(entry->end - entry->start));
3003
3004                         entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
3005                 } else {
3006                         /*
3007                          * We're attempting to read a copy-on-write page --
3008                          * don't allow writes.
3009                          */
3010
3011                         prot &= ~VM_PROT_WRITE;
3012                 }
3013         }
3014
3015         /*
3016          * Create an object if necessary.
3017          */
3018         if (entry->object.vm_object == NULL &&
3019             !map->system_map) {
3020                 if (use_read_lock && vm_map_lock_upgrade(map))  {
3021                         use_read_lock = 0;
3022                         goto RetryLookup;
3023                 }
3024                 use_read_lock = 0;
3025                 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
3026                     atop(entry->end - entry->start));
3027                 entry->offset = 0;
3028         }
3029
3030         /*
3031          * Return the object/offset from this entry.  If the entry was
3032          * copy-on-write or empty, it has been fixed up.
3033          */
3034
3035         *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
3036         *object = entry->object.vm_object;
3037
3038         /*
3039          * Return whether this is the only map sharing this data.  On
3040          * success we return with a read lock held on the map.  On failure
3041          * we return with the map unlocked.
3042          */
3043         *out_prot = prot;
3044 done:
3045         if (rv == KERN_SUCCESS) {
3046                 if (use_read_lock == 0)
3047                         vm_map_lock_downgrade(map);
3048         } else if (use_read_lock) {
3049                 vm_map_unlock_read(map);
3050         } else {
3051                 vm_map_unlock(map);
3052         }
3053         return (rv);
3054 }
3055
3056 /*
3057  *      vm_map_lookup_done:
3058  *
3059  *      Releases locks acquired by a vm_map_lookup
3060  *      (according to the handle returned by that lookup).
3061  */
3062
3063 void
3064 vm_map_lookup_done(map, entry)
3065         vm_map_t map;
3066         vm_map_entry_t entry;
3067 {
3068         /*
3069          * Unlock the main-level map
3070          */
3071
3072         vm_map_unlock_read(map);
3073 }
3074
3075 /*
3076  * Implement uiomove with VM operations.  This handles (and collateral changes)
3077  * support every combination of source object modification, and COW type
3078  * operations.
3079  */
3080 int
3081 vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
3082         vm_map_t mapa;
3083         vm_object_t srcobject;
3084         off_t cp;
3085         int cnta;
3086         vm_offset_t uaddra;
3087         int *npages;
3088 {
3089         vm_map_t map;
3090         vm_object_t first_object, oldobject, object;
3091         vm_map_entry_t entry;
3092         vm_prot_t prot;
3093         boolean_t wired;
3094         int tcnt, rv;
3095         vm_offset_t uaddr, start, end, tend;
3096         vm_pindex_t first_pindex, osize, oindex;
3097         off_t ooffset;
3098         int cnt;
3099
3100         if (npages)
3101                 *npages = 0;
3102
3103         cnt = cnta;
3104         uaddr = uaddra;
3105
3106         while (cnt > 0) {
3107                 map = mapa;
3108
3109                 if ((vm_map_lookup(&map, uaddr,
3110                         VM_PROT_READ, &entry, &first_object,
3111                         &first_pindex, &prot, &wired)) != KERN_SUCCESS) {
3112                         return EFAULT;
3113                 }
3114
3115                 vm_map_clip_start(map, entry, uaddr);
3116
3117                 tcnt = cnt;
3118                 tend = uaddr + tcnt;
3119                 if (tend > entry->end) {
3120                         tcnt = entry->end - uaddr;
3121                         tend = entry->end;
3122                 }
3123
3124                 vm_map_clip_end(map, entry, tend);
3125
3126                 start = entry->start;
3127                 end = entry->end;
3128
3129                 osize = atop(tcnt);
3130
3131                 oindex = OFF_TO_IDX(cp);
3132                 if (npages) {
3133                         vm_pindex_t idx;
3134                         for (idx = 0; idx < osize; idx++) {
3135                                 vm_page_t m;
3136                                 if ((m = vm_page_lookup(srcobject, oindex + idx)) == NULL) {
3137                                         vm_map_lookup_done(map, entry);
3138                                         return 0;
3139                                 }
3140                                 /*
3141                                  * disallow busy or invalid pages, but allow
3142                                  * m->busy pages if they are entirely valid.
3143                                  */
3144                                 if ((m->flags & PG_BUSY) ||
3145                                         ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) {
3146                                         vm_map_lookup_done(map, entry);
3147                                         return 0;
3148                                 }
3149                         }
3150                 }
3151
3152 /*
3153  * If we are changing an existing map entry, just redirect
3154  * the object, and change mappings.
3155  */
3156                 if ((first_object->type == OBJT_VNODE) &&
3157                         ((oldobject = entry->object.vm_object) == first_object)) {
3158
3159                         if ((entry->offset != cp) || (oldobject != srcobject)) {
3160                                 /*
3161                                 * Remove old window into the file
3162                                 */
3163                                 pmap_remove (map->pmap, uaddr, tend);
3164
3165                                 /*
3166                                 * Force copy on write for mmaped regions
3167                                 */
3168                                 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
3169
3170                                 /*
3171                                 * Point the object appropriately
3172                                 */
3173                                 if (oldobject != srcobject) {
3174
3175                                 /*
3176                                 * Set the object optimization hint flag
3177                                 */
3178                                         vm_object_set_flag(srcobject, OBJ_OPT);
3179                                         vm_object_reference(srcobject);
3180                                         entry->object.vm_object = srcobject;
3181
3182                                         if (oldobject) {
3183                                                 vm_object_deallocate(oldobject);
3184                                         }
3185                                 }
3186
3187                                 entry->offset = cp;
3188                                 map->timestamp++;
3189                         } else {
3190                                 pmap_remove (map->pmap, uaddr, tend);
3191                         }
3192
3193                 } else if ((first_object->ref_count == 1) &&
3194                         (first_object->size == osize) &&
3195                         ((first_object->type == OBJT_DEFAULT) ||
3196                                 (first_object->type == OBJT_SWAP)) ) {
3197
3198                         oldobject = first_object->backing_object;
3199
3200                         if ((first_object->backing_object_offset != cp) ||
3201                                 (oldobject != srcobject)) {
3202                                 /*
3203                                 * Remove old window into the file
3204                                 */
3205                                 pmap_remove (map->pmap, uaddr, tend);
3206
3207                                 /*
3208                                  * Remove unneeded old pages
3209                                  */
3210                                 vm_object_page_remove(first_object, 0, 0, 0);
3211
3212                                 /*
3213                                  * Invalidate swap space
3214                                  */
3215                                 if (first_object->type == OBJT_SWAP) {
3216                                         swap_pager_freespace(first_object,
3217                                                 0,
3218                                                 first_object->size);
3219                                 }
3220
3221                                 /*
3222                                 * Force copy on write for mmaped regions
3223                                 */
3224                                 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
3225
3226                                 /*
3227                                 * Point the object appropriately
3228                                 */
3229                                 if (oldobject != srcobject) {
3230
3231                                 /*
3232                                 * Set the object optimization hint flag
3233                                 */
3234                                         vm_object_set_flag(srcobject, OBJ_OPT);
3235                                         vm_object_reference(srcobject);
3236
3237                                         if (oldobject) {
3238                                                 LIST_REMOVE(
3239                                                         first_object, shadow_list);
3240                                                 oldobject->shadow_count--;
3241                                                 /* XXX bump generation? */
3242                                                 vm_object_deallocate(oldobject);
3243                                         }
3244
3245                                         LIST_INSERT_HEAD(&srcobject->shadow_head,
3246                                                 first_object, shadow_list);
3247                                         srcobject->shadow_count++;
3248                                         /* XXX bump generation? */
3249
3250                                         first_object->backing_object = srcobject;
3251                                 }
3252                                 first_object->backing_object_offset = cp;
3253                                 map->timestamp++;
3254                         } else {
3255                                 pmap_remove (map->pmap, uaddr, tend);
3256                         }
3257 /*
3258  * Otherwise, we have to do a logical mmap.
3259  */
3260                 } else {
3261
3262                         vm_object_set_flag(srcobject, OBJ_OPT);
3263                         vm_object_reference(srcobject);
3264
3265                         pmap_remove (map->pmap, uaddr, tend);
3266
3267                         vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
3268                         vm_map_lock_upgrade(map);
3269
3270                         if (entry == &map->header) {
3271                                 map->first_free = &map->header;
3272                         } else if (map->first_free->start >= start) {
3273                                 map->first_free = entry->prev;
3274                         }
3275
3276                         SAVE_HINT(map, entry->prev);
3277                         vm_map_entry_delete(map, entry);
3278
3279                         object = srcobject;
3280                         ooffset = cp;
3281
3282                         rv = vm_map_insert(map, object, ooffset, start, tend,
3283                                 VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE);
3284
3285                         if (rv != KERN_SUCCESS)
3286                                 panic("vm_uiomove: could not insert new entry: %d", rv);
3287                 }
3288
3289 /*
3290  * Map the window directly, if it is already in memory
3291  */
3292                 pmap_object_init_pt(map->pmap, uaddr,
3293                         srcobject, oindex, tcnt, 0);
3294
3295                 map->timestamp++;
3296                 vm_map_unlock(map);
3297
3298                 cnt -= tcnt;
3299                 uaddr += tcnt;
3300                 cp += tcnt;
3301                 if (npages)
3302                         *npages += osize;
3303         }
3304         return 0;
3305 }
3306
3307 /*
3308  * Performs the copy_on_write operations necessary to allow the virtual copies
3309  * into user space to work.  This has to be called for write(2) system calls
3310  * from other processes, file unlinking, and file size shrinkage.
3311  */
3312 void
3313 vm_freeze_copyopts(object, froma, toa)
3314         vm_object_t object;
3315         vm_pindex_t froma, toa;
3316 {
3317         int rv;
3318         vm_object_t robject;
3319         vm_pindex_t idx;
3320
3321         if ((object == NULL) ||
3322                 ((object->flags & OBJ_OPT) == 0))
3323                 return;
3324
3325         if (object->shadow_count > object->ref_count)
3326                 panic("vm_freeze_copyopts: sc > rc");
3327
3328         while((robject = LIST_FIRST(&object->shadow_head)) != NULL) {
3329                 vm_pindex_t bo_pindex;
3330                 vm_page_t m_in, m_out;
3331
3332                 bo_pindex = OFF_TO_IDX(robject->backing_object_offset);
3333
3334                 vm_object_reference(robject);
3335
3336                 vm_object_pip_wait(robject, "objfrz");
3337
3338                 if (robject->ref_count == 1) {
3339                         vm_object_deallocate(robject);
3340                         continue;
3341                 }
3342
3343                 vm_object_pip_add(robject, 1);
3344
3345                 for (idx = 0; idx < robject->size; idx++) {
3346
3347                         m_out = vm_page_grab(robject, idx,
3348                                                 VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
3349
3350                         if (m_out->valid == 0) {
3351                                 m_in = vm_page_grab(object, bo_pindex + idx,
3352                                                 VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
3353                                 if (m_in->valid == 0) {
3354                                         rv = vm_pager_get_pages(object, &m_in, 1, 0);
3355                                         if (rv != VM_PAGER_OK) {
3356                                                 printf("vm_freeze_copyopts: cannot read page from file: %lx\n", (long)m_in->pindex);
3357                                                 continue;
3358                                         }
3359                                         vm_page_deactivate(m_in);
3360                                 }
3361
3362                                 vm_page_protect(m_in, VM_PROT_NONE);
3363                                 pmap_copy_page(VM_PAGE_TO_PHYS(m_in), VM_PAGE_TO_PHYS(m_out));
3364                                 m_out->valid = m_in->valid;
3365                                 vm_page_dirty(m_out);
3366                                 vm_page_activate(m_out);
3367                                 vm_page_wakeup(m_in);
3368                         }
3369                         vm_page_wakeup(m_out);
3370                 }
3371
3372                 object->shadow_count--;
3373                 object->ref_count--;
3374                 LIST_REMOVE(robject, shadow_list);
3375                 robject->backing_object = NULL;
3376                 robject->backing_object_offset = 0;
3377
3378                 vm_object_pip_wakeup(robject);
3379                 vm_object_deallocate(robject);
3380         }
3381
3382         vm_object_clear_flag(object, OBJ_OPT);
3383 }
3384
3385 #include "opt_ddb.h"
3386 #ifdef DDB
3387 #include <sys/kernel.h>
3388
3389 #include <ddb/ddb.h>
3390
3391 /*
3392  *      vm_map_print:   [ debug ]
3393  */
3394 DB_SHOW_COMMAND(map, vm_map_print)
3395 {
3396         static int nlines;
3397         /* XXX convert args. */
3398         vm_map_t map = (vm_map_t)addr;
3399         boolean_t full = have_addr;
3400
3401         vm_map_entry_t entry;
3402
3403         db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
3404             (void *)map,
3405             (void *)map->pmap, map->nentries, map->timestamp);
3406         nlines++;
3407
3408         if (!full && db_indent)
3409                 return;
3410
3411         db_indent += 2;
3412         for (entry = map->header.next; entry != &map->header;
3413             entry = entry->next) {
3414                 db_iprintf("map entry %p: start=%p, end=%p\n",
3415                     (void *)entry, (void *)entry->start, (void *)entry->end);
3416                 nlines++;
3417                 {
3418                         static char *inheritance_name[4] =
3419                         {"share", "copy", "none", "donate_copy"};
3420
3421                         db_iprintf(" prot=%x/%x/%s",
3422                             entry->protection,
3423                             entry->max_protection,
3424                             inheritance_name[(int)(unsigned char)entry->inheritance]);
3425                         if (entry->wired_count != 0)
3426                                 db_printf(", wired");
3427                 }
3428                 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
3429                         /* XXX no %qd in kernel.  Truncate entry->offset. */
3430                         db_printf(", share=%p, offset=0x%lx\n",
3431                             (void *)entry->object.sub_map,
3432                             (long)entry->offset);
3433                         nlines++;
3434                         if ((entry->prev == &map->header) ||
3435                             (entry->prev->object.sub_map !=
3436                                 entry->object.sub_map)) {
3437                                 db_indent += 2;
3438                                 vm_map_print((db_expr_t)(intptr_t)
3439                                              entry->object.sub_map,
3440                                              full, 0, (char *)0);
3441                                 db_indent -= 2;
3442                         }
3443                 } else {
3444                         /* XXX no %qd in kernel.  Truncate entry->offset. */
3445                         db_printf(", object=%p, offset=0x%lx",
3446                             (void *)entry->object.vm_object,
3447                             (long)entry->offset);
3448                         if (entry->eflags & MAP_ENTRY_COW)
3449                                 db_printf(", copy (%s)",
3450                                     (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
3451                         db_printf("\n");
3452                         nlines++;
3453
3454                         if ((entry->prev == &map->header) ||
3455                             (entry->prev->object.vm_object !=
3456                                 entry->object.vm_object)) {
3457                                 db_indent += 2;
3458                                 vm_object_print((db_expr_t)(intptr_t)
3459                                                 entry->object.vm_object,
3460                                                 full, 0, (char *)0);
3461                                 nlines += 4;
3462                                 db_indent -= 2;
3463                         }
3464                 }
3465         }
3466         db_indent -= 2;
3467         if (db_indent == 0)
3468                 nlines = 0;
3469 }
3470
3471
3472 DB_SHOW_COMMAND(procvm, procvm)
3473 {
3474         struct proc *p;
3475
3476         if (have_addr) {
3477                 p = (struct proc *) addr;
3478         } else {
3479                 p = curproc;
3480         }
3481
3482         db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
3483             (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
3484             (void *)vmspace_pmap(p->p_vmspace));
3485
3486         vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
3487 }
3488
3489 #endif /* DDB */