Initial import from FreeBSD RELENG_4:
[dragonfly.git] / sys / vm / vm_map.c
1 /*
2  * Copyright (c) 1991, 1993
3  *      The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by the University of
19  *      California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *      from: @(#)vm_map.c      8.3 (Berkeley) 1/12/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $FreeBSD: src/sys/vm/vm_map.c,v 1.187.2.19 2003/05/27 00:47:02 alc Exp $
65  */
66
67 /*
68  *      Virtual memory mapping module.
69  */
70
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/proc.h>
74 #include <sys/vmmeter.h>
75 #include <sys/mman.h>
76 #include <sys/vnode.h>
77 #include <sys/resourcevar.h>
78
79 #include <vm/vm.h>
80 #include <vm/vm_param.h>
81 #include <sys/lock.h>
82 #include <vm/pmap.h>
83 #include <vm/vm_map.h>
84 #include <vm/vm_page.h>
85 #include <vm/vm_object.h>
86 #include <vm/vm_pager.h>
87 #include <vm/vm_kern.h>
88 #include <vm/vm_extern.h>
89 #include <vm/swap_pager.h>
90 #include <vm/vm_zone.h>
91
92 /*
93  *      Virtual memory maps provide for the mapping, protection,
94  *      and sharing of virtual memory objects.  In addition,
95  *      this module provides for an efficient virtual copy of
96  *      memory from one map to another.
97  *
98  *      Synchronization is required prior to most operations.
99  *
100  *      Maps consist of an ordered doubly-linked list of simple
101  *      entries; a single hint is used to speed up lookups.
102  *
103  *      Since portions of maps are specified by start/end addresses,
104  *      which may not align with existing map entries, all
105  *      routines merely "clip" entries to these start/end values.
106  *      [That is, an entry is split into two, bordering at a
107  *      start or end value.]  Note that these clippings may not
108  *      always be necessary (as the two resulting entries are then
109  *      not changed); however, the clipping is done for convenience.
110  *
111  *      As mentioned above, virtual copy operations are performed
112  *      by copying VM object references from one map to
113  *      another, and then marking both regions as copy-on-write.
114  */
115
116 /*
117  *      vm_map_startup:
118  *
119  *      Initialize the vm_map module.  Must be called before
120  *      any other vm_map routines.
121  *
122  *      Map and entry structures are allocated from the general
123  *      purpose memory pool with some exceptions:
124  *
125  *      - The kernel map and kmem submap are allocated statically.
126  *      - Kernel map entries are allocated out of a static pool.
127  *
128  *      These restrictions are necessary since malloc() uses the
129  *      maps and requires map entries.
130  */
131
132 static struct vm_zone kmapentzone_store, mapentzone_store, mapzone_store;
133 static vm_zone_t mapentzone, kmapentzone, mapzone, vmspace_zone;
134 static struct vm_object kmapentobj, mapentobj, mapobj;
135
136 static struct vm_map_entry map_entry_init[MAX_MAPENT];
137 static struct vm_map_entry kmap_entry_init[MAX_KMAPENT];
138 static struct vm_map map_init[MAX_KMAP];
139
140 static void _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t));
141 static void _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t));
142 static vm_map_entry_t vm_map_entry_create __P((vm_map_t));
143 static void vm_map_entry_delete __P((vm_map_t, vm_map_entry_t));
144 static void vm_map_entry_dispose __P((vm_map_t, vm_map_entry_t));
145 static void vm_map_entry_unwire __P((vm_map_t, vm_map_entry_t));
146 static void vm_map_copy_entry __P((vm_map_t, vm_map_t, vm_map_entry_t,
147                 vm_map_entry_t));
148 static void vm_map_split __P((vm_map_entry_t));
149 static void vm_map_unclip_range __P((vm_map_t map, vm_map_entry_t start_entry, vm_offset_t start, vm_offset_t end, int flags));
150
151 void
152 vm_map_startup()
153 {
154         mapzone = &mapzone_store;
155         zbootinit(mapzone, "MAP", sizeof (struct vm_map),
156                 map_init, MAX_KMAP);
157         kmapentzone = &kmapentzone_store;
158         zbootinit(kmapentzone, "KMAP ENTRY", sizeof (struct vm_map_entry),
159                 kmap_entry_init, MAX_KMAPENT);
160         mapentzone = &mapentzone_store;
161         zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry),
162                 map_entry_init, MAX_MAPENT);
163 }
164
165 /*
166  * Allocate a vmspace structure, including a vm_map and pmap,
167  * and initialize those structures.  The refcnt is set to 1.
168  * The remaining fields must be initialized by the caller.
169  */
170 struct vmspace *
171 vmspace_alloc(min, max)
172         vm_offset_t min, max;
173 {
174         struct vmspace *vm;
175
176         vm = zalloc(vmspace_zone);
177         vm_map_init(&vm->vm_map, min, max);
178         pmap_pinit(vmspace_pmap(vm));
179         vm->vm_map.pmap = vmspace_pmap(vm);             /* XXX */
180         vm->vm_refcnt = 1;
181         vm->vm_shm = NULL;
182         vm->vm_exitingcnt = 0;
183         return (vm);
184 }
185
186 void
187 vm_init2(void) {
188         zinitna(kmapentzone, &kmapentobj,
189                 NULL, 0, lmin((VM_MAX_KERNEL_ADDRESS - KERNBASE) / PAGE_SIZE,
190                 cnt.v_page_count) / 8, ZONE_INTERRUPT, 1);
191         zinitna(mapentzone, &mapentobj,
192                 NULL, 0, 0, 0, 1);
193         zinitna(mapzone, &mapobj,
194                 NULL, 0, 0, 0, 1);
195         vmspace_zone = zinit("VMSPACE", sizeof (struct vmspace), 0, 0, 3);
196         pmap_init2();
197         vm_object_init2();
198 }
199
200 static __inline void
201 vmspace_dofree(struct vmspace *vm)
202 {
203         /*
204          * Lock the map, to wait out all other references to it.
205          * Delete all of the mappings and pages they hold, then call
206          * the pmap module to reclaim anything left.
207          */
208         vm_map_lock(&vm->vm_map);
209         (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
210             vm->vm_map.max_offset);
211         vm_map_unlock(&vm->vm_map);
212
213         pmap_release(vmspace_pmap(vm));
214         zfree(vmspace_zone, vm);
215 }
216
217 void
218 vmspace_free(struct vmspace *vm)
219 {
220         if (vm->vm_refcnt == 0)
221                 panic("vmspace_free: attempt to free already freed vmspace");
222
223         if (--vm->vm_refcnt == 0 && vm->vm_exitingcnt == 0)
224                 vmspace_dofree(vm);
225 }
226
227 void
228 vmspace_exitfree(struct proc *p)
229 {
230         struct vmspace *vm;
231
232         vm = p->p_vmspace;
233         p->p_vmspace = NULL;
234
235         /*
236          * cleanup by parent process wait()ing on exiting child.  vm_refcnt
237          * may not be 0 (e.g. fork() and child exits without exec()ing).
238          * exitingcnt may increment above 0 and drop back down to zero
239          * several times while vm_refcnt is held non-zero.  vm_refcnt
240          * may also increment above 0 and drop back down to zero several
241          * times while vm_exitingcnt is held non-zero.
242          *
243          * The last wait on the exiting child's vmspace will clean up
244          * the remainder of the vmspace.
245          */
246         if (--vm->vm_exitingcnt == 0 && vm->vm_refcnt == 0)
247                 vmspace_dofree(vm);
248 }
249
250 /*
251  * vmspace_swap_count() - count the approximate swap useage in pages for a
252  *                        vmspace.
253  *
254  *      Swap useage is determined by taking the proportional swap used by
255  *      VM objects backing the VM map.  To make up for fractional losses,
256  *      if the VM object has any swap use at all the associated map entries
257  *      count for at least 1 swap page.
258  */
259 int
260 vmspace_swap_count(struct vmspace *vmspace)
261 {
262         vm_map_t map = &vmspace->vm_map;
263         vm_map_entry_t cur;
264         int count = 0;
265
266         for (cur = map->header.next; cur != &map->header; cur = cur->next) {
267                 vm_object_t object;
268
269                 if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
270                     (object = cur->object.vm_object) != NULL &&
271                     object->type == OBJT_SWAP
272                 ) {
273                         int n = (cur->end - cur->start) / PAGE_SIZE;
274
275                         if (object->un_pager.swp.swp_bcount) {
276                                 count += object->un_pager.swp.swp_bcount *
277                                     SWAP_META_PAGES * n / object->size + 1;
278                         }
279                 }
280         }
281         return(count);
282 }
283
284
285 /*
286  *      vm_map_create:
287  *
288  *      Creates and returns a new empty VM map with
289  *      the given physical map structure, and having
290  *      the given lower and upper address bounds.
291  */
292 vm_map_t
293 vm_map_create(pmap, min, max)
294         pmap_t pmap;
295         vm_offset_t min, max;
296 {
297         vm_map_t result;
298
299         result = zalloc(mapzone);
300         vm_map_init(result, min, max);
301         result->pmap = pmap;
302         return (result);
303 }
304
305 /*
306  * Initialize an existing vm_map structure
307  * such as that in the vmspace structure.
308  * The pmap is set elsewhere.
309  */
310 void
311 vm_map_init(map, min, max)
312         struct vm_map *map;
313         vm_offset_t min, max;
314 {
315         map->header.next = map->header.prev = &map->header;
316         map->nentries = 0;
317         map->size = 0;
318         map->system_map = 0;
319         map->infork = 0;
320         map->min_offset = min;
321         map->max_offset = max;
322         map->first_free = &map->header;
323         map->hint = &map->header;
324         map->timestamp = 0;
325         lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE);
326 }
327
328 /*
329  *      vm_map_entry_dispose:   [ internal use only ]
330  *
331  *      Inverse of vm_map_entry_create.
332  */
333 static void
334 vm_map_entry_dispose(map, entry)
335         vm_map_t map;
336         vm_map_entry_t entry;
337 {
338         if (map->system_map || !mapentzone)
339                 zfreei(kmapentzone, entry);
340         else
341                 zfree(mapentzone, entry);
342 }
343
344 /*
345  *      vm_map_entry_create:    [ internal use only ]
346  *
347  *      Allocates a VM map entry for insertion.
348  *      No entry fields are filled in.  This routine is
349  */
350 static vm_map_entry_t
351 vm_map_entry_create(map)
352         vm_map_t map;
353 {
354         vm_map_entry_t new_entry;
355
356         if (map->system_map || !mapentzone)
357                 new_entry = zalloci(kmapentzone);
358         else
359                 new_entry = zalloc(mapentzone);
360         if (new_entry == NULL)
361             panic("vm_map_entry_create: kernel resources exhausted");
362         return(new_entry);
363 }
364
365 /*
366  *      vm_map_entry_{un,}link:
367  *
368  *      Insert/remove entries from maps.
369  */
370 static __inline void
371 vm_map_entry_link(vm_map_t map,
372                   vm_map_entry_t after_where,
373                   vm_map_entry_t entry)
374 {
375         map->nentries++;
376         entry->prev = after_where;
377         entry->next = after_where->next;
378         entry->next->prev = entry;
379         after_where->next = entry;
380 }
381
382 static __inline void
383 vm_map_entry_unlink(vm_map_t map,
384                     vm_map_entry_t entry)
385 {
386         vm_map_entry_t prev;
387         vm_map_entry_t next;
388
389         if (entry->eflags & MAP_ENTRY_IN_TRANSITION)
390                 panic("vm_map_entry_unlink: attempt to mess with locked entry! %p", entry);
391         prev = entry->prev;
392         next = entry->next;
393         next->prev = prev;
394         prev->next = next;
395         map->nentries--;
396 }
397
398 /*
399  *      SAVE_HINT:
400  *
401  *      Saves the specified entry as the hint for
402  *      future lookups.
403  */
404 #define SAVE_HINT(map,value) \
405                 (map)->hint = (value);
406
407 /*
408  *      vm_map_lookup_entry:    [ internal use only ]
409  *
410  *      Finds the map entry containing (or
411  *      immediately preceding) the specified address
412  *      in the given map; the entry is returned
413  *      in the "entry" parameter.  The boolean
414  *      result indicates whether the address is
415  *      actually contained in the map.
416  */
417 boolean_t
418 vm_map_lookup_entry(map, address, entry)
419         vm_map_t map;
420         vm_offset_t address;
421         vm_map_entry_t *entry;  /* OUT */
422 {
423         vm_map_entry_t cur;
424         vm_map_entry_t last;
425
426         /*
427          * Start looking either from the head of the list, or from the hint.
428          */
429
430         cur = map->hint;
431
432         if (cur == &map->header)
433                 cur = cur->next;
434
435         if (address >= cur->start) {
436                 /*
437                  * Go from hint to end of list.
438                  *
439                  * But first, make a quick check to see if we are already looking
440                  * at the entry we want (which is usually the case). Note also
441                  * that we don't need to save the hint here... it is the same
442                  * hint (unless we are at the header, in which case the hint
443                  * didn't buy us anything anyway).
444                  */
445                 last = &map->header;
446                 if ((cur != last) && (cur->end > address)) {
447                         *entry = cur;
448                         return (TRUE);
449                 }
450         } else {
451                 /*
452                  * Go from start to hint, *inclusively*
453                  */
454                 last = cur->next;
455                 cur = map->header.next;
456         }
457
458         /*
459          * Search linearly
460          */
461
462         while (cur != last) {
463                 if (cur->end > address) {
464                         if (address >= cur->start) {
465                                 /*
466                                  * Save this lookup for future hints, and
467                                  * return
468                                  */
469
470                                 *entry = cur;
471                                 SAVE_HINT(map, cur);
472                                 return (TRUE);
473                         }
474                         break;
475                 }
476                 cur = cur->next;
477         }
478         *entry = cur->prev;
479         SAVE_HINT(map, *entry);
480         return (FALSE);
481 }
482
483 /*
484  *      vm_map_insert:
485  *
486  *      Inserts the given whole VM object into the target
487  *      map at the specified address range.  The object's
488  *      size should match that of the address range.
489  *
490  *      Requires that the map be locked, and leaves it so.
491  *
492  *      If object is non-NULL, ref count must be bumped by caller
493  *      prior to making call to account for the new entry.
494  */
495 int
496 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
497               vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max,
498               int cow)
499 {
500         vm_map_entry_t new_entry;
501         vm_map_entry_t prev_entry;
502         vm_map_entry_t temp_entry;
503         vm_eflags_t protoeflags;
504
505         /*
506          * Check that the start and end points are not bogus.
507          */
508
509         if ((start < map->min_offset) || (end > map->max_offset) ||
510             (start >= end))
511                 return (KERN_INVALID_ADDRESS);
512
513         /*
514          * Find the entry prior to the proposed starting address; if it's part
515          * of an existing entry, this range is bogus.
516          */
517
518         if (vm_map_lookup_entry(map, start, &temp_entry))
519                 return (KERN_NO_SPACE);
520
521         prev_entry = temp_entry;
522
523         /*
524          * Assert that the next entry doesn't overlap the end point.
525          */
526
527         if ((prev_entry->next != &map->header) &&
528             (prev_entry->next->start < end))
529                 return (KERN_NO_SPACE);
530
531         protoeflags = 0;
532
533         if (cow & MAP_COPY_ON_WRITE)
534                 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
535
536         if (cow & MAP_NOFAULT) {
537                 protoeflags |= MAP_ENTRY_NOFAULT;
538
539                 KASSERT(object == NULL,
540                         ("vm_map_insert: paradoxical MAP_NOFAULT request"));
541         }
542         if (cow & MAP_DISABLE_SYNCER)
543                 protoeflags |= MAP_ENTRY_NOSYNC;
544         if (cow & MAP_DISABLE_COREDUMP)
545                 protoeflags |= MAP_ENTRY_NOCOREDUMP;
546
547         if (object) {
548                 /*
549                  * When object is non-NULL, it could be shared with another
550                  * process.  We have to set or clear OBJ_ONEMAPPING 
551                  * appropriately.
552                  */
553                 if ((object->ref_count > 1) || (object->shadow_count != 0)) {
554                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
555                 }
556         }
557         else if ((prev_entry != &map->header) &&
558                  (prev_entry->eflags == protoeflags) &&
559                  (prev_entry->end == start) &&
560                  (prev_entry->wired_count == 0) &&
561                  ((prev_entry->object.vm_object == NULL) ||
562                   vm_object_coalesce(prev_entry->object.vm_object,
563                                      OFF_TO_IDX(prev_entry->offset),
564                                      (vm_size_t)(prev_entry->end - prev_entry->start),
565                                      (vm_size_t)(end - prev_entry->end)))) {
566                 /*
567                  * We were able to extend the object.  Determine if we
568                  * can extend the previous map entry to include the 
569                  * new range as well.
570                  */
571                 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
572                     (prev_entry->protection == prot) &&
573                     (prev_entry->max_protection == max)) {
574                         map->size += (end - prev_entry->end);
575                         prev_entry->end = end;
576                         vm_map_simplify_entry(map, prev_entry);
577                         return (KERN_SUCCESS);
578                 }
579
580                 /*
581                  * If we can extend the object but cannot extend the
582                  * map entry, we have to create a new map entry.  We
583                  * must bump the ref count on the extended object to
584                  * account for it.  object may be NULL.
585                  */
586                 object = prev_entry->object.vm_object;
587                 offset = prev_entry->offset +
588                         (prev_entry->end - prev_entry->start);
589                 vm_object_reference(object);
590         }
591
592         /*
593          * NOTE: if conditionals fail, object can be NULL here.  This occurs
594          * in things like the buffer map where we manage kva but do not manage
595          * backing objects.
596          */
597
598         /*
599          * Create a new entry
600          */
601
602         new_entry = vm_map_entry_create(map);
603         new_entry->start = start;
604         new_entry->end = end;
605
606         new_entry->eflags = protoeflags;
607         new_entry->object.vm_object = object;
608         new_entry->offset = offset;
609         new_entry->avail_ssize = 0;
610
611         new_entry->inheritance = VM_INHERIT_DEFAULT;
612         new_entry->protection = prot;
613         new_entry->max_protection = max;
614         new_entry->wired_count = 0;
615
616         /*
617          * Insert the new entry into the list
618          */
619
620         vm_map_entry_link(map, prev_entry, new_entry);
621         map->size += new_entry->end - new_entry->start;
622
623         /*
624          * Update the free space hint
625          */
626         if ((map->first_free == prev_entry) &&
627             (prev_entry->end >= new_entry->start)) {
628                 map->first_free = new_entry;
629         }
630
631 #if 0
632         /*
633          * Temporarily removed to avoid MAP_STACK panic, due to
634          * MAP_STACK being a huge hack.  Will be added back in
635          * when MAP_STACK (and the user stack mapping) is fixed.
636          */
637         /*
638          * It may be possible to simplify the entry
639          */
640         vm_map_simplify_entry(map, new_entry);
641 #endif
642
643         if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) {
644                 pmap_object_init_pt(map->pmap, start,
645                                     object, OFF_TO_IDX(offset), end - start,
646                                     cow & MAP_PREFAULT_PARTIAL);
647         }
648
649         return (KERN_SUCCESS);
650 }
651
652 /*
653  * Find sufficient space for `length' bytes in the given map, starting at
654  * `start'.  The map must be locked.  Returns 0 on success, 1 on no space.
655  */
656 int
657 vm_map_findspace(map, start, length, addr)
658         vm_map_t map;
659         vm_offset_t start;
660         vm_size_t length;
661         vm_offset_t *addr;
662 {
663         vm_map_entry_t entry, next;
664         vm_offset_t end;
665
666         if (start < map->min_offset)
667                 start = map->min_offset;
668         if (start > map->max_offset)
669                 return (1);
670
671         /*
672          * Look for the first possible address; if there's already something
673          * at this address, we have to start after it.
674          */
675         if (start == map->min_offset) {
676                 if ((entry = map->first_free) != &map->header)
677                         start = entry->end;
678         } else {
679                 vm_map_entry_t tmp;
680
681                 if (vm_map_lookup_entry(map, start, &tmp))
682                         start = tmp->end;
683                 entry = tmp;
684         }
685
686         /*
687          * Look through the rest of the map, trying to fit a new region in the
688          * gap between existing regions, or after the very last region.
689          */
690         for (;; start = (entry = next)->end) {
691                 /*
692                  * Find the end of the proposed new region.  Be sure we didn't
693                  * go beyond the end of the map, or wrap around the address;
694                  * if so, we lose.  Otherwise, if this is the last entry, or
695                  * if the proposed new region fits before the next entry, we
696                  * win.
697                  */
698                 end = start + length;
699                 if (end > map->max_offset || end < start)
700                         return (1);
701                 next = entry->next;
702                 if (next == &map->header || next->start >= end)
703                         break;
704         }
705         SAVE_HINT(map, entry);
706         *addr = start;
707         if (map == kernel_map) {
708                 vm_offset_t ksize;
709                 if ((ksize = round_page(start + length)) > kernel_vm_end) {
710                         pmap_growkernel(ksize);
711                 }
712         }
713         return (0);
714 }
715
716 /*
717  *      vm_map_find finds an unallocated region in the target address
718  *      map with the given length.  The search is defined to be
719  *      first-fit from the specified address; the region found is
720  *      returned in the same parameter.
721  *
722  *      If object is non-NULL, ref count must be bumped by caller
723  *      prior to making call to account for the new entry.
724  */
725 int
726 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
727             vm_offset_t *addr,  /* IN/OUT */
728             vm_size_t length, boolean_t find_space, vm_prot_t prot,
729             vm_prot_t max, int cow)
730 {
731         vm_offset_t start;
732         int result, s = 0;
733
734         start = *addr;
735
736         if (map == kmem_map || map == mb_map)
737                 s = splvm();
738
739         vm_map_lock(map);
740         if (find_space) {
741                 if (vm_map_findspace(map, start, length, addr)) {
742                         vm_map_unlock(map);
743                         if (map == kmem_map || map == mb_map)
744                                 splx(s);
745                         return (KERN_NO_SPACE);
746                 }
747                 start = *addr;
748         }
749         result = vm_map_insert(map, object, offset,
750                 start, start + length, prot, max, cow);
751         vm_map_unlock(map);
752
753         if (map == kmem_map || map == mb_map)
754                 splx(s);
755
756         return (result);
757 }
758
759 /*
760  *      vm_map_simplify_entry:
761  *
762  *      Simplify the given map entry by merging with either neighbor.  This
763  *      routine also has the ability to merge with both neighbors.
764  *
765  *      The map must be locked.
766  *
767  *      This routine guarentees that the passed entry remains valid (though
768  *      possibly extended).  When merging, this routine may delete one or
769  *      both neighbors.  No action is taken on entries which have their
770  *      in-transition flag set.
771  */
772 void
773 vm_map_simplify_entry(map, entry)
774         vm_map_t map;
775         vm_map_entry_t entry;
776 {
777         vm_map_entry_t next, prev;
778         vm_size_t prevsize, esize;
779
780         if (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP)) {
781                 ++cnt.v_intrans_coll;
782                 return;
783         }
784
785         prev = entry->prev;
786         if (prev != &map->header) {
787                 prevsize = prev->end - prev->start;
788                 if ( (prev->end == entry->start) &&
789                      (prev->object.vm_object == entry->object.vm_object) &&
790                      (!prev->object.vm_object ||
791                         (prev->offset + prevsize == entry->offset)) &&
792                      (prev->eflags == entry->eflags) &&
793                      (prev->protection == entry->protection) &&
794                      (prev->max_protection == entry->max_protection) &&
795                      (prev->inheritance == entry->inheritance) &&
796                      (prev->wired_count == entry->wired_count)) {
797                         if (map->first_free == prev)
798                                 map->first_free = entry;
799                         if (map->hint == prev)
800                                 map->hint = entry;
801                         vm_map_entry_unlink(map, prev);
802                         entry->start = prev->start;
803                         entry->offset = prev->offset;
804                         if (prev->object.vm_object)
805                                 vm_object_deallocate(prev->object.vm_object);
806                         vm_map_entry_dispose(map, prev);
807                 }
808         }
809
810         next = entry->next;
811         if (next != &map->header) {
812                 esize = entry->end - entry->start;
813                 if ((entry->end == next->start) &&
814                     (next->object.vm_object == entry->object.vm_object) &&
815                      (!entry->object.vm_object ||
816                         (entry->offset + esize == next->offset)) &&
817                     (next->eflags == entry->eflags) &&
818                     (next->protection == entry->protection) &&
819                     (next->max_protection == entry->max_protection) &&
820                     (next->inheritance == entry->inheritance) &&
821                     (next->wired_count == entry->wired_count)) {
822                         if (map->first_free == next)
823                                 map->first_free = entry;
824                         if (map->hint == next)
825                                 map->hint = entry;
826                         vm_map_entry_unlink(map, next);
827                         entry->end = next->end;
828                         if (next->object.vm_object)
829                                 vm_object_deallocate(next->object.vm_object);
830                         vm_map_entry_dispose(map, next);
831                 }
832         }
833 }
834 /*
835  *      vm_map_clip_start:      [ internal use only ]
836  *
837  *      Asserts that the given entry begins at or after
838  *      the specified address; if necessary,
839  *      it splits the entry into two.
840  */
841 #define vm_map_clip_start(map, entry, startaddr) \
842 { \
843         if (startaddr > entry->start) \
844                 _vm_map_clip_start(map, entry, startaddr); \
845 }
846
847 /*
848  *      This routine is called only when it is known that
849  *      the entry must be split.
850  */
851 static void
852 _vm_map_clip_start(map, entry, start)
853         vm_map_t map;
854         vm_map_entry_t entry;
855         vm_offset_t start;
856 {
857         vm_map_entry_t new_entry;
858
859         /*
860          * Split off the front portion -- note that we must insert the new
861          * entry BEFORE this one, so that this entry has the specified
862          * starting address.
863          */
864
865         vm_map_simplify_entry(map, entry);
866
867         /*
868          * If there is no object backing this entry, we might as well create
869          * one now.  If we defer it, an object can get created after the map
870          * is clipped, and individual objects will be created for the split-up
871          * map.  This is a bit of a hack, but is also about the best place to
872          * put this improvement.
873          */
874
875         if (entry->object.vm_object == NULL && !map->system_map) {
876                 vm_object_t object;
877                 object = vm_object_allocate(OBJT_DEFAULT,
878                                 atop(entry->end - entry->start));
879                 entry->object.vm_object = object;
880                 entry->offset = 0;
881         }
882
883         new_entry = vm_map_entry_create(map);
884         *new_entry = *entry;
885
886         new_entry->end = start;
887         entry->offset += (start - entry->start);
888         entry->start = start;
889
890         vm_map_entry_link(map, entry->prev, new_entry);
891
892         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
893                 vm_object_reference(new_entry->object.vm_object);
894         }
895 }
896
897 /*
898  *      vm_map_clip_end:        [ internal use only ]
899  *
900  *      Asserts that the given entry ends at or before
901  *      the specified address; if necessary,
902  *      it splits the entry into two.
903  */
904
905 #define vm_map_clip_end(map, entry, endaddr) \
906 { \
907         if (endaddr < entry->end) \
908                 _vm_map_clip_end(map, entry, endaddr); \
909 }
910
911 /*
912  *      This routine is called only when it is known that
913  *      the entry must be split.
914  */
915 static void
916 _vm_map_clip_end(map, entry, end)
917         vm_map_t map;
918         vm_map_entry_t entry;
919         vm_offset_t end;
920 {
921         vm_map_entry_t new_entry;
922
923         /*
924          * If there is no object backing this entry, we might as well create
925          * one now.  If we defer it, an object can get created after the map
926          * is clipped, and individual objects will be created for the split-up
927          * map.  This is a bit of a hack, but is also about the best place to
928          * put this improvement.
929          */
930
931         if (entry->object.vm_object == NULL && !map->system_map) {
932                 vm_object_t object;
933                 object = vm_object_allocate(OBJT_DEFAULT,
934                                 atop(entry->end - entry->start));
935                 entry->object.vm_object = object;
936                 entry->offset = 0;
937         }
938
939         /*
940          * Create a new entry and insert it AFTER the specified entry
941          */
942
943         new_entry = vm_map_entry_create(map);
944         *new_entry = *entry;
945
946         new_entry->start = entry->end = end;
947         new_entry->offset += (end - entry->start);
948
949         vm_map_entry_link(map, entry, new_entry);
950
951         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
952                 vm_object_reference(new_entry->object.vm_object);
953         }
954 }
955
956 /*
957  *      VM_MAP_RANGE_CHECK:     [ internal use only ]
958  *
959  *      Asserts that the starting and ending region
960  *      addresses fall within the valid range of the map.
961  */
962 #define VM_MAP_RANGE_CHECK(map, start, end)             \
963                 {                                       \
964                 if (start < vm_map_min(map))            \
965                         start = vm_map_min(map);        \
966                 if (end > vm_map_max(map))              \
967                         end = vm_map_max(map);          \
968                 if (start > end)                        \
969                         start = end;                    \
970                 }
971
972 /*
973  *      vm_map_transition_wait: [ kernel use only ]
974  *
975  *      Used to block when an in-transition collison occurs.  The map
976  *      is unlocked for the sleep and relocked before the return.
977  */
978 static
979 void
980 vm_map_transition_wait(vm_map_t map)
981 {
982         vm_map_unlock(map);
983         tsleep(map, PVM, "vment", 0);
984         vm_map_lock(map);
985 }
986
987 /*
988  * CLIP_CHECK_BACK
989  * CLIP_CHECK_FWD
990  *
991  *      When we do blocking operations with the map lock held it is
992  *      possible that a clip might have occured on our in-transit entry,
993  *      requiring an adjustment to the entry in our loop.  These macros
994  *      help the pageable and clip_range code deal with the case.  The
995  *      conditional costs virtually nothing if no clipping has occured.
996  */
997
998 #define CLIP_CHECK_BACK(entry, save_start)              \
999     do {                                                \
1000             while (entry->start != save_start) {        \
1001                     entry = entry->prev;                \
1002                     KASSERT(entry != &map->header, ("bad entry clip")); \
1003             }                                           \
1004     } while(0)
1005
1006 #define CLIP_CHECK_FWD(entry, save_end)                 \
1007     do {                                                \
1008             while (entry->end != save_end) {            \
1009                     entry = entry->next;                \
1010                     KASSERT(entry != &map->header, ("bad entry clip")); \
1011             }                                           \
1012     } while(0)
1013
1014
1015 /*
1016  *      vm_map_clip_range:      [ kernel use only ]
1017  *
1018  *      Clip the specified range and return the base entry.  The
1019  *      range may cover several entries starting at the returned base
1020  *      and the first and last entry in the covering sequence will be
1021  *      properly clipped to the requested start and end address.
1022  *
1023  *      If no holes are allowed you should pass the MAP_CLIP_NO_HOLES
1024  *      flag.  
1025  *
1026  *      The MAP_ENTRY_IN_TRANSITION flag will be set for the entries
1027  *      covered by the requested range.
1028  *
1029  *      The map must be exclusively locked on entry and will remain locked
1030  *      on return. If no range exists or the range contains holes and you
1031  *      specified that no holes were allowed, NULL will be returned.  This
1032  *      routine may temporarily unlock the map in order avoid a deadlock when
1033  *      sleeping.
1034  */
1035 static
1036 vm_map_entry_t
1037 vm_map_clip_range(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
1038 {
1039         vm_map_entry_t start_entry;
1040         vm_map_entry_t entry;
1041
1042         /*
1043          * Locate the entry and effect initial clipping.  The in-transition
1044          * case does not occur very often so do not try to optimize it.
1045          */
1046 again:
1047         if (vm_map_lookup_entry(map, start, &start_entry) == FALSE)
1048                 return (NULL);
1049         entry = start_entry;
1050         if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1051                 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1052                 ++cnt.v_intrans_coll;
1053                 ++cnt.v_intrans_wait;
1054                 vm_map_transition_wait(map);
1055                 /*
1056                  * entry and/or start_entry may have been clipped while
1057                  * we slept, or may have gone away entirely.  We have
1058                  * to restart from the lookup.
1059                  */
1060                 goto again;
1061         }
1062         /*
1063          * Since we hold an exclusive map lock we do not have to restart
1064          * after clipping, even though clipping may block in zalloc.
1065          */
1066         vm_map_clip_start(map, entry, start);
1067         vm_map_clip_end(map, entry, end);
1068         entry->eflags |= MAP_ENTRY_IN_TRANSITION;
1069
1070         /*
1071          * Scan entries covered by the range.  When working on the next
1072          * entry a restart need only re-loop on the current entry which
1073          * we have already locked, since 'next' may have changed.  Also,
1074          * even though entry is safe, it may have been clipped so we
1075          * have to iterate forwards through the clip after sleeping.
1076          */
1077         while (entry->next != &map->header && entry->next->start < end) {
1078                 vm_map_entry_t next = entry->next;
1079
1080                 if (flags & MAP_CLIP_NO_HOLES) {
1081                         if (next->start > entry->end) {
1082                                 vm_map_unclip_range(map, start_entry,
1083                                         start, entry->end, flags);
1084                                 return(NULL);
1085                         }
1086                 }
1087
1088                 if (next->eflags & MAP_ENTRY_IN_TRANSITION) {
1089                         vm_offset_t save_end = entry->end;
1090                         next->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1091                         ++cnt.v_intrans_coll;
1092                         ++cnt.v_intrans_wait;
1093                         vm_map_transition_wait(map);
1094
1095                         /*
1096                          * clips might have occured while we blocked.
1097                          */
1098                         CLIP_CHECK_FWD(entry, save_end);
1099                         CLIP_CHECK_BACK(start_entry, start);
1100                         continue;
1101                 }
1102                 /*
1103                  * No restart necessary even though clip_end may block, we
1104                  * are holding the map lock.
1105                  */
1106                 vm_map_clip_end(map, next, end);
1107                 next->eflags |= MAP_ENTRY_IN_TRANSITION;
1108                 entry = next;
1109         }
1110         if (flags & MAP_CLIP_NO_HOLES) {
1111                 if (entry->end != end) {
1112                         vm_map_unclip_range(map, start_entry,
1113                                 start, entry->end, flags);
1114                         return(NULL);
1115                 }
1116         }
1117         return(start_entry);
1118 }
1119
1120 /*
1121  *      vm_map_unclip_range:    [ kernel use only ]
1122  *
1123  *      Undo the effect of vm_map_clip_range().  You should pass the same
1124  *      flags and the same range that you passed to vm_map_clip_range().
1125  *      This code will clear the in-transition flag on the entries and
1126  *      wake up anyone waiting.  This code will also simplify the sequence 
1127  *      and attempt to merge it with entries before and after the sequence.
1128  *
1129  *      The map must be locked on entry and will remain locked on return.
1130  *
1131  *      Note that you should also pass the start_entry returned by 
1132  *      vm_map_clip_range().  However, if you block between the two calls
1133  *      with the map unlocked please be aware that the start_entry may
1134  *      have been clipped and you may need to scan it backwards to find
1135  *      the entry corresponding with the original start address.  You are
1136  *      responsible for this, vm_map_unclip_range() expects the correct
1137  *      start_entry to be passed to it and will KASSERT otherwise.
1138  */
1139 static
1140 void
1141 vm_map_unclip_range(
1142         vm_map_t map,
1143         vm_map_entry_t start_entry,
1144         vm_offset_t start,
1145         vm_offset_t end,
1146         int flags)
1147 {
1148         vm_map_entry_t entry;
1149
1150         entry = start_entry;
1151
1152         KASSERT(entry->start == start, ("unclip_range: illegal base entry"));
1153         while (entry != &map->header && entry->start < end) {
1154                 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION, ("in-transition flag not set during unclip on: %p", entry));
1155                 KASSERT(entry->end <= end, ("unclip_range: tail wasn't clipped"));
1156                 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
1157                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
1158                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
1159                         wakeup(map);
1160                 }
1161                 entry = entry->next;
1162         }
1163
1164         /*
1165          * Simplification does not block so there is no restart case.
1166          */
1167         entry = start_entry;
1168         while (entry != &map->header && entry->start < end) {
1169                 vm_map_simplify_entry(map, entry);
1170                 entry = entry->next;
1171         }
1172 }
1173
1174 /*
1175  *      vm_map_submap:          [ kernel use only ]
1176  *
1177  *      Mark the given range as handled by a subordinate map.
1178  *
1179  *      This range must have been created with vm_map_find,
1180  *      and no other operations may have been performed on this
1181  *      range prior to calling vm_map_submap.
1182  *
1183  *      Only a limited number of operations can be performed
1184  *      within this rage after calling vm_map_submap:
1185  *              vm_fault
1186  *      [Don't try vm_map_copy!]
1187  *
1188  *      To remove a submapping, one must first remove the
1189  *      range from the superior map, and then destroy the
1190  *      submap (if desired).  [Better yet, don't try it.]
1191  */
1192 int
1193 vm_map_submap(map, start, end, submap)
1194         vm_map_t map;
1195         vm_offset_t start;
1196         vm_offset_t end;
1197         vm_map_t submap;
1198 {
1199         vm_map_entry_t entry;
1200         int result = KERN_INVALID_ARGUMENT;
1201
1202         vm_map_lock(map);
1203
1204         VM_MAP_RANGE_CHECK(map, start, end);
1205
1206         if (vm_map_lookup_entry(map, start, &entry)) {
1207                 vm_map_clip_start(map, entry, start);
1208         } else {
1209                 entry = entry->next;
1210         }
1211
1212         vm_map_clip_end(map, entry, end);
1213
1214         if ((entry->start == start) && (entry->end == end) &&
1215             ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1216             (entry->object.vm_object == NULL)) {
1217                 entry->object.sub_map = submap;
1218                 entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
1219                 result = KERN_SUCCESS;
1220         }
1221         vm_map_unlock(map);
1222
1223         return (result);
1224 }
1225
1226 /*
1227  *      vm_map_protect:
1228  *
1229  *      Sets the protection of the specified address
1230  *      region in the target map.  If "set_max" is
1231  *      specified, the maximum protection is to be set;
1232  *      otherwise, only the current protection is affected.
1233  */
1234 int
1235 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1236                vm_prot_t new_prot, boolean_t set_max)
1237 {
1238         vm_map_entry_t current;
1239         vm_map_entry_t entry;
1240
1241         vm_map_lock(map);
1242
1243         VM_MAP_RANGE_CHECK(map, start, end);
1244
1245         if (vm_map_lookup_entry(map, start, &entry)) {
1246                 vm_map_clip_start(map, entry, start);
1247         } else {
1248                 entry = entry->next;
1249         }
1250
1251         /*
1252          * Make a first pass to check for protection violations.
1253          */
1254
1255         current = entry;
1256         while ((current != &map->header) && (current->start < end)) {
1257                 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1258                         vm_map_unlock(map);
1259                         return (KERN_INVALID_ARGUMENT);
1260                 }
1261                 if ((new_prot & current->max_protection) != new_prot) {
1262                         vm_map_unlock(map);
1263                         return (KERN_PROTECTION_FAILURE);
1264                 }
1265                 current = current->next;
1266         }
1267
1268         /*
1269          * Go back and fix up protections. [Note that clipping is not
1270          * necessary the second time.]
1271          */
1272
1273         current = entry;
1274
1275         while ((current != &map->header) && (current->start < end)) {
1276                 vm_prot_t old_prot;
1277
1278                 vm_map_clip_end(map, current, end);
1279
1280                 old_prot = current->protection;
1281                 if (set_max)
1282                         current->protection =
1283                             (current->max_protection = new_prot) &
1284                             old_prot;
1285                 else
1286                         current->protection = new_prot;
1287
1288                 /*
1289                  * Update physical map if necessary. Worry about copy-on-write
1290                  * here -- CHECK THIS XXX
1291                  */
1292
1293                 if (current->protection != old_prot) {
1294 #define MASK(entry)     (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1295                                                         VM_PROT_ALL)
1296
1297                         pmap_protect(map->pmap, current->start,
1298                             current->end,
1299                             current->protection & MASK(current));
1300 #undef  MASK
1301                 }
1302
1303                 vm_map_simplify_entry(map, current);
1304
1305                 current = current->next;
1306         }
1307
1308         vm_map_unlock(map);
1309         return (KERN_SUCCESS);
1310 }
1311
1312 /*
1313  *      vm_map_madvise:
1314  *
1315  *      This routine traverses a processes map handling the madvise
1316  *      system call.  Advisories are classified as either those effecting
1317  *      the vm_map_entry structure, or those effecting the underlying 
1318  *      objects.
1319  */
1320
1321 int
1322 vm_map_madvise(map, start, end, behav)
1323         vm_map_t map;
1324         vm_offset_t start, end;
1325         int behav;
1326 {
1327         vm_map_entry_t current, entry;
1328         int modify_map = 0;
1329
1330         /*
1331          * Some madvise calls directly modify the vm_map_entry, in which case
1332          * we need to use an exclusive lock on the map and we need to perform 
1333          * various clipping operations.  Otherwise we only need a read-lock
1334          * on the map.
1335          */
1336
1337         switch(behav) {
1338         case MADV_NORMAL:
1339         case MADV_SEQUENTIAL:
1340         case MADV_RANDOM:
1341         case MADV_NOSYNC:
1342         case MADV_AUTOSYNC:
1343         case MADV_NOCORE:
1344         case MADV_CORE:
1345                 modify_map = 1;
1346                 vm_map_lock(map);
1347                 break;
1348         case MADV_WILLNEED:
1349         case MADV_DONTNEED:
1350         case MADV_FREE:
1351                 vm_map_lock_read(map);
1352                 break;
1353         default:
1354                 return (KERN_INVALID_ARGUMENT);
1355         }
1356
1357         /*
1358          * Locate starting entry and clip if necessary.
1359          */
1360
1361         VM_MAP_RANGE_CHECK(map, start, end);
1362
1363         if (vm_map_lookup_entry(map, start, &entry)) {
1364                 if (modify_map)
1365                         vm_map_clip_start(map, entry, start);
1366         } else {
1367                 entry = entry->next;
1368         }
1369
1370         if (modify_map) {
1371                 /*
1372                  * madvise behaviors that are implemented in the vm_map_entry.
1373                  *
1374                  * We clip the vm_map_entry so that behavioral changes are
1375                  * limited to the specified address range.
1376                  */
1377                 for (current = entry;
1378                      (current != &map->header) && (current->start < end);
1379                      current = current->next
1380                 ) {
1381                         if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
1382                                 continue;
1383
1384                         vm_map_clip_end(map, current, end);
1385
1386                         switch (behav) {
1387                         case MADV_NORMAL:
1388                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
1389                                 break;
1390                         case MADV_SEQUENTIAL:
1391                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
1392                                 break;
1393                         case MADV_RANDOM:
1394                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
1395                                 break;
1396                         case MADV_NOSYNC:
1397                                 current->eflags |= MAP_ENTRY_NOSYNC;
1398                                 break;
1399                         case MADV_AUTOSYNC:
1400                                 current->eflags &= ~MAP_ENTRY_NOSYNC;
1401                                 break;
1402                         case MADV_NOCORE:
1403                                 current->eflags |= MAP_ENTRY_NOCOREDUMP;
1404                                 break;
1405                         case MADV_CORE:
1406                                 current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
1407                                 break;
1408                         default:
1409                                 break;
1410                         }
1411                         vm_map_simplify_entry(map, current);
1412                 }
1413                 vm_map_unlock(map);
1414         } else {
1415                 vm_pindex_t pindex;
1416                 int count;
1417
1418                 /*
1419                  * madvise behaviors that are implemented in the underlying
1420                  * vm_object.
1421                  *
1422                  * Since we don't clip the vm_map_entry, we have to clip
1423                  * the vm_object pindex and count.
1424                  */
1425                 for (current = entry;
1426                      (current != &map->header) && (current->start < end);
1427                      current = current->next
1428                 ) {
1429                         vm_offset_t useStart;
1430
1431                         if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
1432                                 continue;
1433
1434                         pindex = OFF_TO_IDX(current->offset);
1435                         count = atop(current->end - current->start);
1436                         useStart = current->start;
1437
1438                         if (current->start < start) {
1439                                 pindex += atop(start - current->start);
1440                                 count -= atop(start - current->start);
1441                                 useStart = start;
1442                         }
1443                         if (current->end > end)
1444                                 count -= atop(current->end - end);
1445
1446                         if (count <= 0)
1447                                 continue;
1448
1449                         vm_object_madvise(current->object.vm_object,
1450                                           pindex, count, behav);
1451                         if (behav == MADV_WILLNEED) {
1452                                 pmap_object_init_pt(
1453                                     map->pmap, 
1454                                     useStart,
1455                                     current->object.vm_object,
1456                                     pindex, 
1457                                     (count << PAGE_SHIFT),
1458                                     MAP_PREFAULT_MADVISE
1459                                 );
1460                         }
1461                 }
1462                 vm_map_unlock_read(map);
1463         }
1464         return(0);
1465 }       
1466
1467
1468 /*
1469  *      vm_map_inherit:
1470  *
1471  *      Sets the inheritance of the specified address
1472  *      range in the target map.  Inheritance
1473  *      affects how the map will be shared with
1474  *      child maps at the time of vm_map_fork.
1475  */
1476 int
1477 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
1478                vm_inherit_t new_inheritance)
1479 {
1480         vm_map_entry_t entry;
1481         vm_map_entry_t temp_entry;
1482
1483         switch (new_inheritance) {
1484         case VM_INHERIT_NONE:
1485         case VM_INHERIT_COPY:
1486         case VM_INHERIT_SHARE:
1487                 break;
1488         default:
1489                 return (KERN_INVALID_ARGUMENT);
1490         }
1491
1492         vm_map_lock(map);
1493
1494         VM_MAP_RANGE_CHECK(map, start, end);
1495
1496         if (vm_map_lookup_entry(map, start, &temp_entry)) {
1497                 entry = temp_entry;
1498                 vm_map_clip_start(map, entry, start);
1499         } else
1500                 entry = temp_entry->next;
1501
1502         while ((entry != &map->header) && (entry->start < end)) {
1503                 vm_map_clip_end(map, entry, end);
1504
1505                 entry->inheritance = new_inheritance;
1506
1507                 vm_map_simplify_entry(map, entry);
1508
1509                 entry = entry->next;
1510         }
1511
1512         vm_map_unlock(map);
1513         return (KERN_SUCCESS);
1514 }
1515
1516 /*
1517  * Implement the semantics of mlock
1518  */
1519 int
1520 vm_map_user_pageable(map, start, real_end, new_pageable)
1521         vm_map_t map;
1522         vm_offset_t start;
1523         vm_offset_t real_end;
1524         boolean_t new_pageable;
1525 {
1526         vm_map_entry_t entry;
1527         vm_map_entry_t start_entry;
1528         vm_offset_t end;
1529         int rv = KERN_SUCCESS;
1530
1531         vm_map_lock(map);
1532         VM_MAP_RANGE_CHECK(map, start, real_end);
1533         end = real_end;
1534
1535         start_entry = vm_map_clip_range(map, start, end, MAP_CLIP_NO_HOLES);
1536         if (start_entry == NULL) {
1537                 vm_map_unlock(map);
1538                 return (KERN_INVALID_ADDRESS);
1539         }
1540
1541         if (new_pageable == 0) {
1542                 entry = start_entry;
1543                 while ((entry != &map->header) && (entry->start < end)) {
1544                         vm_offset_t save_start;
1545                         vm_offset_t save_end;
1546
1547                         /*
1548                          * Already user wired or hard wired (trivial cases)
1549                          */
1550                         if (entry->eflags & MAP_ENTRY_USER_WIRED) {
1551                                 entry = entry->next;
1552                                 continue;
1553                         }
1554                         if (entry->wired_count != 0) {
1555                                 entry->wired_count++;
1556                                 entry->eflags |= MAP_ENTRY_USER_WIRED;
1557                                 entry = entry->next;
1558                                 continue;
1559                         }
1560
1561                         /*
1562                          * A new wiring requires instantiation of appropriate
1563                          * management structures and the faulting in of the
1564                          * page.
1565                          */
1566                         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1567                                 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
1568                                 if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) {
1569
1570                                         vm_object_shadow(&entry->object.vm_object,
1571                                             &entry->offset,
1572                                             atop(entry->end - entry->start));
1573                                         entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
1574
1575                                 } else if (entry->object.vm_object == NULL &&
1576                                            !map->system_map) {
1577
1578                                         entry->object.vm_object =
1579                                             vm_object_allocate(OBJT_DEFAULT,
1580                                                 atop(entry->end - entry->start));
1581                                         entry->offset = (vm_offset_t) 0;
1582
1583                                 }
1584                         }
1585                         entry->wired_count++;
1586                         entry->eflags |= MAP_ENTRY_USER_WIRED;
1587
1588                         /*
1589                          * Now fault in the area.  The map lock needs to be
1590                          * manipulated to avoid deadlocks.  The in-transition
1591                          * flag protects the entries. 
1592                          */
1593                         save_start = entry->start;
1594                         save_end = entry->end;
1595                         vm_map_unlock(map);
1596                         map->timestamp++;
1597                         rv = vm_fault_user_wire(map, save_start, save_end);
1598                         vm_map_lock(map);
1599                         if (rv) {
1600                                 CLIP_CHECK_BACK(entry, save_start);
1601                                 for (;;) {
1602                                         KASSERT(entry->wired_count == 1, ("bad wired_count on entry"));
1603                                         entry->eflags &= ~MAP_ENTRY_USER_WIRED;
1604                                         entry->wired_count = 0;
1605                                         if (entry->end == save_end)
1606                                                 break;
1607                                         entry = entry->next;
1608                                         KASSERT(entry != &map->header, ("bad entry clip during backout"));
1609                                 }
1610                                 end = save_start;       /* unwire the rest */
1611                                 break;
1612                         }
1613                         /*
1614                          * note that even though the entry might have been
1615                          * clipped, the USER_WIRED flag we set prevents
1616                          * duplication so we do not have to do a 
1617                          * clip check.
1618                          */
1619                         entry = entry->next;
1620                 }
1621
1622                 /*
1623                  * If we failed fall through to the unwiring section to
1624                  * unwire what we had wired so far.  'end' has already
1625                  * been adjusted.
1626                  */
1627                 if (rv)
1628                         new_pageable = 1;
1629
1630                 /*
1631                  * start_entry might have been clipped if we unlocked the
1632                  * map and blocked.  No matter how clipped it has gotten
1633                  * there should be a fragment that is on our start boundary.
1634                  */
1635                 CLIP_CHECK_BACK(start_entry, start);
1636         }
1637
1638         /*
1639          * Deal with the unwiring case.
1640          */
1641         if (new_pageable) {
1642                 /*
1643                  * This is the unwiring case.  We must first ensure that the
1644                  * range to be unwired is really wired down.  We know there
1645                  * are no holes.
1646                  */
1647                 entry = start_entry;
1648                 while ((entry != &map->header) && (entry->start < end)) {
1649                         if ((entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
1650                                 rv = KERN_INVALID_ARGUMENT;
1651                                 goto done;
1652                         }
1653                         KASSERT(entry->wired_count != 0, ("wired count was 0 with USER_WIRED set! %p", entry));
1654                         entry = entry->next;
1655                 }
1656
1657                 /*
1658                  * Now decrement the wiring count for each region. If a region
1659                  * becomes completely unwired, unwire its physical pages and
1660                  * mappings.
1661                  */
1662                 while ((entry != &map->header) && (entry->start < end)) {
1663                         KASSERT(entry->eflags & MAP_ENTRY_USER_WIRED, ("expected USER_WIRED on entry %p", entry));
1664                         entry->eflags &= ~MAP_ENTRY_USER_WIRED;
1665                         entry->wired_count--;
1666                         if (entry->wired_count == 0)
1667                                 vm_fault_unwire(map, entry->start, entry->end);
1668                         entry = entry->next;
1669                 }
1670         }
1671 done:
1672         vm_map_unclip_range(map, start_entry, start, real_end, 
1673                 MAP_CLIP_NO_HOLES);
1674         map->timestamp++;
1675         vm_map_unlock(map);
1676         return (rv);
1677 }
1678
1679 /*
1680  *      vm_map_pageable:
1681  *
1682  *      Sets the pageability of the specified address
1683  *      range in the target map.  Regions specified
1684  *      as not pageable require locked-down physical
1685  *      memory and physical page maps.
1686  *
1687  *      The map must not be locked, but a reference
1688  *      must remain to the map throughout the call.
1689  */
1690 int
1691 vm_map_pageable(map, start, real_end, new_pageable)
1692         vm_map_t map;
1693         vm_offset_t start;
1694         vm_offset_t real_end;
1695         boolean_t new_pageable;
1696 {
1697         vm_map_entry_t entry;
1698         vm_map_entry_t start_entry;
1699         vm_offset_t end;
1700         int rv = KERN_SUCCESS;
1701         int s;
1702
1703         vm_map_lock(map);
1704         VM_MAP_RANGE_CHECK(map, start, real_end);
1705         end = real_end;
1706
1707         start_entry = vm_map_clip_range(map, start, end, MAP_CLIP_NO_HOLES);
1708         if (start_entry == NULL) {
1709                 vm_map_unlock(map);
1710                 return (KERN_INVALID_ADDRESS);
1711         }
1712         if (new_pageable == 0) {
1713                 /*
1714                  * Wiring.  
1715                  *
1716                  * 1.  Holding the write lock, we create any shadow or zero-fill
1717                  * objects that need to be created. Then we clip each map
1718                  * entry to the region to be wired and increment its wiring
1719                  * count.  We create objects before clipping the map entries
1720                  * to avoid object proliferation.
1721                  *
1722                  * 2.  We downgrade to a read lock, and call vm_fault_wire to
1723                  * fault in the pages for any newly wired area (wired_count is
1724                  * 1).
1725                  *
1726                  * Downgrading to a read lock for vm_fault_wire avoids a 
1727                  * possible deadlock with another process that may have faulted
1728                  * on one of the pages to be wired (it would mark the page busy,
1729                  * blocking us, then in turn block on the map lock that we
1730                  * hold).  Because of problems in the recursive lock package,
1731                  * we cannot upgrade to a write lock in vm_map_lookup.  Thus,
1732                  * any actions that require the write lock must be done
1733                  * beforehand.  Because we keep the read lock on the map, the
1734                  * copy-on-write status of the entries we modify here cannot
1735                  * change.
1736                  */
1737
1738                 entry = start_entry;
1739                 while ((entry != &map->header) && (entry->start < end)) {
1740                         /*
1741                          * Trivial case if the entry is already wired
1742                          */
1743                         if (entry->wired_count) {
1744                                 entry->wired_count++;
1745                                 entry = entry->next;
1746                                 continue;
1747                         }
1748
1749                         /*
1750                          * The entry is being newly wired, we have to setup
1751                          * appropriate management structures.  A shadow 
1752                          * object is required for a copy-on-write region,
1753                          * or a normal object for a zero-fill region.  We
1754                          * do not have to do this for entries that point to sub
1755                          * maps because we won't hold the lock on the sub map.
1756                          */
1757                         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1758                                 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
1759                                 if (copyflag &&
1760                                     ((entry->protection & VM_PROT_WRITE) != 0)) {
1761
1762                                         vm_object_shadow(&entry->object.vm_object,
1763                                             &entry->offset,
1764                                             atop(entry->end - entry->start));
1765                                         entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
1766                                 } else if (entry->object.vm_object == NULL &&
1767                                            !map->system_map) {
1768                                         entry->object.vm_object =
1769                                             vm_object_allocate(OBJT_DEFAULT,
1770                                                 atop(entry->end - entry->start));
1771                                         entry->offset = (vm_offset_t) 0;
1772                                 }
1773                         }
1774
1775                         entry->wired_count++;
1776                         entry = entry->next;
1777                 }
1778
1779                 /*
1780                  * Pass 2.
1781                  */
1782
1783                 /*
1784                  * HACK HACK HACK HACK
1785                  *
1786                  * Unlock the map to avoid deadlocks.  The in-transit flag
1787                  * protects us from most changes but note that
1788                  * clipping may still occur.  To prevent clipping from
1789                  * occuring after the unlock, except for when we are
1790                  * blocking in vm_fault_wire, we must run at splvm().
1791                  * Otherwise our accesses to entry->start and entry->end
1792                  * could be corrupted.  We have to set splvm() prior to
1793                  * unlocking so start_entry does not change out from
1794                  * under us at the very beginning of the loop.
1795                  *
1796                  * HACK HACK HACK HACK
1797                  */
1798
1799                 s = splvm();
1800                 vm_map_unlock(map);
1801
1802                 entry = start_entry;
1803                 while (entry != &map->header && entry->start < end) {
1804                         /*
1805                          * If vm_fault_wire fails for any page we need to undo
1806                          * what has been done.  We decrement the wiring count
1807                          * for those pages which have not yet been wired (now)
1808                          * and unwire those that have (later).
1809                          */
1810                         vm_offset_t save_start = entry->start;
1811                         vm_offset_t save_end = entry->end;
1812
1813                         if (entry->wired_count == 1)
1814                                 rv = vm_fault_wire(map, entry->start, entry->end);
1815                         if (rv) {
1816                                 CLIP_CHECK_BACK(entry, save_start);
1817                                 for (;;) {
1818                                         KASSERT(entry->wired_count == 1, ("wired_count changed unexpectedly"));
1819                                         entry->wired_count = 0;
1820                                         if (entry->end == save_end)
1821                                                 break;
1822                                         entry = entry->next;
1823                                         KASSERT(entry != &map->header, ("bad entry clip during backout"));
1824                                 }
1825                                 end = save_start;
1826                                 break;
1827                         }
1828                         CLIP_CHECK_FWD(entry, save_end);
1829                         entry = entry->next;
1830                 }
1831                 splx(s);
1832
1833                 /*
1834                  * relock.  start_entry is still IN_TRANSITION and must
1835                  * still exist, but may have been clipped (handled just
1836                  * below).
1837                  */
1838                 vm_map_lock(map);
1839
1840                 /*
1841                  * If a failure occured undo everything by falling through
1842                  * to the unwiring code.  'end' has already been adjusted
1843                  * appropriately.
1844                  */
1845                 if (rv)
1846                         new_pageable = 1;
1847
1848                 /*
1849                  * start_entry might have been clipped if we unlocked the
1850                  * map and blocked.  No matter how clipped it has gotten
1851                  * there should be a fragment that is on our start boundary.
1852                  */
1853                 CLIP_CHECK_BACK(start_entry, start);
1854         }
1855
1856         if (new_pageable) {
1857                 /*
1858                  * This is the unwiring case.  We must first ensure that the
1859                  * range to be unwired is really wired down.  We know there
1860                  * are no holes.
1861                  */
1862                 entry = start_entry;
1863                 while ((entry != &map->header) && (entry->start < end)) {
1864                         if (entry->wired_count == 0) {
1865                                 rv = KERN_INVALID_ARGUMENT;
1866                                 goto done;
1867                         }
1868                         entry = entry->next;
1869                 }
1870
1871                 /*
1872                  * Now decrement the wiring count for each region. If a region
1873                  * becomes completely unwired, unwire its physical pages and
1874                  * mappings.
1875                  */
1876                 entry = start_entry;
1877                 while ((entry != &map->header) && (entry->start < end)) {
1878                         entry->wired_count--;
1879                         if (entry->wired_count == 0)
1880                                 vm_fault_unwire(map, entry->start, entry->end);
1881                         entry = entry->next;
1882                 }
1883         }
1884 done:
1885         vm_map_unclip_range(map, start_entry, start, real_end, 
1886                 MAP_CLIP_NO_HOLES);
1887         map->timestamp++;
1888         vm_map_unlock(map);
1889         return (rv);
1890 }
1891
1892 /*
1893  * vm_map_clean
1894  *
1895  * Push any dirty cached pages in the address range to their pager.
1896  * If syncio is TRUE, dirty pages are written synchronously.
1897  * If invalidate is TRUE, any cached pages are freed as well.
1898  *
1899  * Returns an error if any part of the specified range is not mapped.
1900  */
1901 int
1902 vm_map_clean(map, start, end, syncio, invalidate)
1903         vm_map_t map;
1904         vm_offset_t start;
1905         vm_offset_t end;
1906         boolean_t syncio;
1907         boolean_t invalidate;
1908 {
1909         vm_map_entry_t current;
1910         vm_map_entry_t entry;
1911         vm_size_t size;
1912         vm_object_t object;
1913         vm_ooffset_t offset;
1914
1915         vm_map_lock_read(map);
1916         VM_MAP_RANGE_CHECK(map, start, end);
1917         if (!vm_map_lookup_entry(map, start, &entry)) {
1918                 vm_map_unlock_read(map);
1919                 return (KERN_INVALID_ADDRESS);
1920         }
1921         /*
1922          * Make a first pass to check for holes.
1923          */
1924         for (current = entry; current->start < end; current = current->next) {
1925                 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1926                         vm_map_unlock_read(map);
1927                         return (KERN_INVALID_ARGUMENT);
1928                 }
1929                 if (end > current->end &&
1930                     (current->next == &map->header ||
1931                         current->end != current->next->start)) {
1932                         vm_map_unlock_read(map);
1933                         return (KERN_INVALID_ADDRESS);
1934                 }
1935         }
1936
1937         if (invalidate)
1938                 pmap_remove(vm_map_pmap(map), start, end);
1939         /*
1940          * Make a second pass, cleaning/uncaching pages from the indicated
1941          * objects as we go.
1942          */
1943         for (current = entry; current->start < end; current = current->next) {
1944                 offset = current->offset + (start - current->start);
1945                 size = (end <= current->end ? end : current->end) - start;
1946                 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1947                         vm_map_t smap;
1948                         vm_map_entry_t tentry;
1949                         vm_size_t tsize;
1950
1951                         smap = current->object.sub_map;
1952                         vm_map_lock_read(smap);
1953                         (void) vm_map_lookup_entry(smap, offset, &tentry);
1954                         tsize = tentry->end - offset;
1955                         if (tsize < size)
1956                                 size = tsize;
1957                         object = tentry->object.vm_object;
1958                         offset = tentry->offset + (offset - tentry->start);
1959                         vm_map_unlock_read(smap);
1960                 } else {
1961                         object = current->object.vm_object;
1962                 }
1963                 /*
1964                  * Note that there is absolutely no sense in writing out
1965                  * anonymous objects, so we track down the vnode object
1966                  * to write out.
1967                  * We invalidate (remove) all pages from the address space
1968                  * anyway, for semantic correctness.
1969                  *
1970                  * note: certain anonymous maps, such as MAP_NOSYNC maps,
1971                  * may start out with a NULL object.
1972                  */
1973                 while (object && object->backing_object) {
1974                         object = object->backing_object;
1975                         offset += object->backing_object_offset;
1976                         if (object->size < OFF_TO_IDX( offset + size))
1977                                 size = IDX_TO_OFF(object->size) - offset;
1978                 }
1979                 if (object && (object->type == OBJT_VNODE) && 
1980                     (current->protection & VM_PROT_WRITE)) {
1981                         /*
1982                          * Flush pages if writing is allowed, invalidate them
1983                          * if invalidation requested.  Pages undergoing I/O
1984                          * will be ignored by vm_object_page_remove().
1985                          *
1986                          * We cannot lock the vnode and then wait for paging
1987                          * to complete without deadlocking against vm_fault.
1988                          * Instead we simply call vm_object_page_remove() and
1989                          * allow it to block internally on a page-by-page 
1990                          * basis when it encounters pages undergoing async 
1991                          * I/O.
1992                          */
1993                         int flags;
1994
1995                         vm_object_reference(object);
1996                         vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curproc);
1997                         flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
1998                         flags |= invalidate ? OBJPC_INVAL : 0;
1999                         vm_object_page_clean(object,
2000                             OFF_TO_IDX(offset),
2001                             OFF_TO_IDX(offset + size + PAGE_MASK),
2002                             flags);
2003                         VOP_UNLOCK(object->handle, 0, curproc);
2004                         vm_object_deallocate(object);
2005                 }
2006                 if (object && invalidate &&
2007                    ((object->type == OBJT_VNODE) ||
2008                     (object->type == OBJT_DEVICE))) {
2009                         vm_object_reference(object);
2010                         vm_object_page_remove(object,
2011                             OFF_TO_IDX(offset),
2012                             OFF_TO_IDX(offset + size + PAGE_MASK),
2013                             FALSE);
2014                         vm_object_deallocate(object);
2015                 }
2016                 start += size;
2017         }
2018
2019         vm_map_unlock_read(map);
2020         return (KERN_SUCCESS);
2021 }
2022
2023 /*
2024  *      vm_map_entry_unwire:    [ internal use only ]
2025  *
2026  *      Make the region specified by this entry pageable.
2027  *
2028  *      The map in question should be locked.
2029  *      [This is the reason for this routine's existence.]
2030  */
2031 static void 
2032 vm_map_entry_unwire(map, entry)
2033         vm_map_t map;
2034         vm_map_entry_t entry;
2035 {
2036         vm_fault_unwire(map, entry->start, entry->end);
2037         entry->wired_count = 0;
2038 }
2039
2040 /*
2041  *      vm_map_entry_delete:    [ internal use only ]
2042  *
2043  *      Deallocate the given entry from the target map.
2044  */
2045 static void
2046 vm_map_entry_delete(map, entry)
2047         vm_map_t map;
2048         vm_map_entry_t entry;
2049 {
2050         vm_map_entry_unlink(map, entry);
2051         map->size -= entry->end - entry->start;
2052
2053         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
2054                 vm_object_deallocate(entry->object.vm_object);
2055         }
2056
2057         vm_map_entry_dispose(map, entry);
2058 }
2059
2060 /*
2061  *      vm_map_delete:  [ internal use only ]
2062  *
2063  *      Deallocates the given address range from the target
2064  *      map.
2065  */
2066 int
2067 vm_map_delete(map, start, end)
2068         vm_map_t map;
2069         vm_offset_t start;
2070         vm_offset_t end;
2071 {
2072         vm_object_t object;
2073         vm_map_entry_t entry;
2074         vm_map_entry_t first_entry;
2075
2076         /*
2077          * Find the start of the region, and clip it
2078          */
2079
2080 again:
2081         if (!vm_map_lookup_entry(map, start, &first_entry))
2082                 entry = first_entry->next;
2083         else {
2084                 entry = first_entry;
2085                 vm_map_clip_start(map, entry, start);
2086                 /*
2087                  * Fix the lookup hint now, rather than each time though the
2088                  * loop.
2089                  */
2090                 SAVE_HINT(map, entry->prev);
2091         }
2092
2093         /*
2094          * Save the free space hint
2095          */
2096
2097         if (entry == &map->header) {
2098                 map->first_free = &map->header;
2099         } else if (map->first_free->start >= start) {
2100                 map->first_free = entry->prev;
2101         }
2102
2103         /*
2104          * Step through all entries in this region
2105          */
2106
2107         while ((entry != &map->header) && (entry->start < end)) {
2108                 vm_map_entry_t next;
2109                 vm_offset_t s, e;
2110                 vm_pindex_t offidxstart, offidxend, count;
2111
2112                 /*
2113                  * If we hit an in-transition entry we have to sleep and
2114                  * retry.  It's easier (and not really slower) to just retry
2115                  * since this case occurs so rarely and the hint is already
2116                  * pointing at the right place.  We have to reset the
2117                  * start offset so as not to accidently delete an entry
2118                  * another process just created in vacated space.
2119                  */
2120                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2121                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2122                         start = entry->start;
2123                         ++cnt.v_intrans_coll;
2124                         ++cnt.v_intrans_wait;
2125                         vm_map_transition_wait(map);
2126                         goto again;
2127                 }
2128                 vm_map_clip_end(map, entry, end);
2129
2130                 s = entry->start;
2131                 e = entry->end;
2132                 next = entry->next;
2133
2134                 offidxstart = OFF_TO_IDX(entry->offset);
2135                 count = OFF_TO_IDX(e - s);
2136                 object = entry->object.vm_object;
2137
2138                 /*
2139                  * Unwire before removing addresses from the pmap; otherwise,
2140                  * unwiring will put the entries back in the pmap.
2141                  */
2142                 if (entry->wired_count != 0) {
2143                         vm_map_entry_unwire(map, entry);
2144                 }
2145
2146                 offidxend = offidxstart + count;
2147
2148                 if ((object == kernel_object) || (object == kmem_object)) {
2149                         vm_object_page_remove(object, offidxstart, offidxend, FALSE);
2150                 } else {
2151                         pmap_remove(map->pmap, s, e);
2152                         if (object != NULL &&
2153                             object->ref_count != 1 &&
2154                             (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING &&
2155                             (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
2156                                 vm_object_collapse(object);
2157                                 vm_object_page_remove(object, offidxstart, offidxend, FALSE);
2158                                 if (object->type == OBJT_SWAP) {
2159                                         swap_pager_freespace(object, offidxstart, count);
2160                                 }
2161                                 if (offidxend >= object->size &&
2162                                     offidxstart < object->size) {
2163                                         object->size = offidxstart;
2164                                 }
2165                         }
2166                 }
2167
2168                 /*
2169                  * Delete the entry (which may delete the object) only after
2170                  * removing all pmap entries pointing to its pages.
2171                  * (Otherwise, its page frames may be reallocated, and any
2172                  * modify bits will be set in the wrong object!)
2173                  */
2174                 vm_map_entry_delete(map, entry);
2175                 entry = next;
2176         }
2177         return (KERN_SUCCESS);
2178 }
2179
2180 /*
2181  *      vm_map_remove:
2182  *
2183  *      Remove the given address range from the target map.
2184  *      This is the exported form of vm_map_delete.
2185  */
2186 int
2187 vm_map_remove(map, start, end)
2188         vm_map_t map;
2189         vm_offset_t start;
2190         vm_offset_t end;
2191 {
2192         int result, s = 0;
2193
2194         if (map == kmem_map || map == mb_map)
2195                 s = splvm();
2196
2197         vm_map_lock(map);
2198         VM_MAP_RANGE_CHECK(map, start, end);
2199         result = vm_map_delete(map, start, end);
2200         vm_map_unlock(map);
2201
2202         if (map == kmem_map || map == mb_map)
2203                 splx(s);
2204
2205         return (result);
2206 }
2207
2208 /*
2209  *      vm_map_check_protection:
2210  *
2211  *      Assert that the target map allows the specified
2212  *      privilege on the entire address region given.
2213  *      The entire region must be allocated.
2214  */
2215 boolean_t
2216 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
2217                         vm_prot_t protection)
2218 {
2219         vm_map_entry_t entry;
2220         vm_map_entry_t tmp_entry;
2221
2222         if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
2223                 return (FALSE);
2224         }
2225         entry = tmp_entry;
2226
2227         while (start < end) {
2228                 if (entry == &map->header) {
2229                         return (FALSE);
2230                 }
2231                 /*
2232                  * No holes allowed!
2233                  */
2234
2235                 if (start < entry->start) {
2236                         return (FALSE);
2237                 }
2238                 /*
2239                  * Check protection associated with entry.
2240                  */
2241
2242                 if ((entry->protection & protection) != protection) {
2243                         return (FALSE);
2244                 }
2245                 /* go to next entry */
2246
2247                 start = entry->end;
2248                 entry = entry->next;
2249         }
2250         return (TRUE);
2251 }
2252
2253 /*
2254  * Split the pages in a map entry into a new object.  This affords
2255  * easier removal of unused pages, and keeps object inheritance from
2256  * being a negative impact on memory usage.
2257  */
2258 static void
2259 vm_map_split(entry)
2260         vm_map_entry_t entry;
2261 {
2262         vm_page_t m;
2263         vm_object_t orig_object, new_object, source;
2264         vm_offset_t s, e;
2265         vm_pindex_t offidxstart, offidxend, idx;
2266         vm_size_t size;
2267         vm_ooffset_t offset;
2268
2269         orig_object = entry->object.vm_object;
2270         if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP)
2271                 return;
2272         if (orig_object->ref_count <= 1)
2273                 return;
2274
2275         offset = entry->offset;
2276         s = entry->start;
2277         e = entry->end;
2278
2279         offidxstart = OFF_TO_IDX(offset);
2280         offidxend = offidxstart + OFF_TO_IDX(e - s);
2281         size = offidxend - offidxstart;
2282
2283         new_object = vm_pager_allocate(orig_object->type,
2284                 NULL, IDX_TO_OFF(size), VM_PROT_ALL, 0LL);
2285         if (new_object == NULL)
2286                 return;
2287
2288         source = orig_object->backing_object;
2289         if (source != NULL) {
2290                 vm_object_reference(source);    /* Referenced by new_object */
2291                 LIST_INSERT_HEAD(&source->shadow_head,
2292                                   new_object, shadow_list);
2293                 vm_object_clear_flag(source, OBJ_ONEMAPPING);
2294                 new_object->backing_object_offset = 
2295                         orig_object->backing_object_offset + IDX_TO_OFF(offidxstart);
2296                 new_object->backing_object = source;
2297                 source->shadow_count++;
2298                 source->generation++;
2299         }
2300
2301         for (idx = 0; idx < size; idx++) {
2302                 vm_page_t m;
2303
2304         retry:
2305                 m = vm_page_lookup(orig_object, offidxstart + idx);
2306                 if (m == NULL)
2307                         continue;
2308
2309                 /*
2310                  * We must wait for pending I/O to complete before we can
2311                  * rename the page.
2312                  *
2313                  * We do not have to VM_PROT_NONE the page as mappings should
2314                  * not be changed by this operation.
2315                  */
2316                 if (vm_page_sleep_busy(m, TRUE, "spltwt"))
2317                         goto retry;
2318                         
2319                 vm_page_busy(m);
2320                 vm_page_rename(m, new_object, idx);
2321                 /* page automatically made dirty by rename and cache handled */
2322                 vm_page_busy(m);
2323         }
2324
2325         if (orig_object->type == OBJT_SWAP) {
2326                 vm_object_pip_add(orig_object, 1);
2327                 /*
2328                  * copy orig_object pages into new_object
2329                  * and destroy unneeded pages in
2330                  * shadow object.
2331                  */
2332                 swap_pager_copy(orig_object, new_object, offidxstart, 0);
2333                 vm_object_pip_wakeup(orig_object);
2334         }
2335
2336         for (idx = 0; idx < size; idx++) {
2337                 m = vm_page_lookup(new_object, idx);
2338                 if (m) {
2339                         vm_page_wakeup(m);
2340                 }
2341         }
2342
2343         entry->object.vm_object = new_object;
2344         entry->offset = 0LL;
2345         vm_object_deallocate(orig_object);
2346 }
2347
2348 /*
2349  *      vm_map_copy_entry:
2350  *
2351  *      Copies the contents of the source entry to the destination
2352  *      entry.  The entries *must* be aligned properly.
2353  */
2354 static void
2355 vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
2356         vm_map_t src_map, dst_map;
2357         vm_map_entry_t src_entry, dst_entry;
2358 {
2359         vm_object_t src_object;
2360
2361         if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
2362                 return;
2363
2364         if (src_entry->wired_count == 0) {
2365
2366                 /*
2367                  * If the source entry is marked needs_copy, it is already
2368                  * write-protected.
2369                  */
2370                 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
2371                         pmap_protect(src_map->pmap,
2372                             src_entry->start,
2373                             src_entry->end,
2374                             src_entry->protection & ~VM_PROT_WRITE);
2375                 }
2376
2377                 /*
2378                  * Make a copy of the object.
2379                  */
2380                 if ((src_object = src_entry->object.vm_object) != NULL) {
2381
2382                         if ((src_object->handle == NULL) &&
2383                                 (src_object->type == OBJT_DEFAULT ||
2384                                  src_object->type == OBJT_SWAP)) {
2385                                 vm_object_collapse(src_object);
2386                                 if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
2387                                         vm_map_split(src_entry);
2388                                         src_object = src_entry->object.vm_object;
2389                                 }
2390                         }
2391
2392                         vm_object_reference(src_object);
2393                         vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
2394                         dst_entry->object.vm_object = src_object;
2395                         src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2396                         dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2397                         dst_entry->offset = src_entry->offset;
2398                 } else {
2399                         dst_entry->object.vm_object = NULL;
2400                         dst_entry->offset = 0;
2401                 }
2402
2403                 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
2404                     dst_entry->end - dst_entry->start, src_entry->start);
2405         } else {
2406                 /*
2407                  * Of course, wired down pages can't be set copy-on-write.
2408                  * Cause wired pages to be copied into the new map by
2409                  * simulating faults (the new pages are pageable)
2410                  */
2411                 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
2412         }
2413 }
2414
2415 /*
2416  * vmspace_fork:
2417  * Create a new process vmspace structure and vm_map
2418  * based on those of an existing process.  The new map
2419  * is based on the old map, according to the inheritance
2420  * values on the regions in that map.
2421  *
2422  * The source map must not be locked.
2423  */
2424 struct vmspace *
2425 vmspace_fork(vm1)
2426         struct vmspace *vm1;
2427 {
2428         struct vmspace *vm2;
2429         vm_map_t old_map = &vm1->vm_map;
2430         vm_map_t new_map;
2431         vm_map_entry_t old_entry;
2432         vm_map_entry_t new_entry;
2433         vm_object_t object;
2434
2435         vm_map_lock(old_map);
2436         old_map->infork = 1;
2437
2438         vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
2439         bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
2440             (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
2441         new_map = &vm2->vm_map; /* XXX */
2442         new_map->timestamp = 1;
2443
2444         old_entry = old_map->header.next;
2445
2446         while (old_entry != &old_map->header) {
2447                 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
2448                         panic("vm_map_fork: encountered a submap");
2449
2450                 switch (old_entry->inheritance) {
2451                 case VM_INHERIT_NONE:
2452                         break;
2453
2454                 case VM_INHERIT_SHARE:
2455                         /*
2456                          * Clone the entry, creating the shared object if necessary.
2457                          */
2458                         object = old_entry->object.vm_object;
2459                         if (object == NULL) {
2460                                 object = vm_object_allocate(OBJT_DEFAULT,
2461                                         atop(old_entry->end - old_entry->start));
2462                                 old_entry->object.vm_object = object;
2463                                 old_entry->offset = (vm_offset_t) 0;
2464                         }
2465
2466                         /*
2467                          * Add the reference before calling vm_object_shadow
2468                          * to insure that a shadow object is created.
2469                          */
2470                         vm_object_reference(object);
2471                         if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2472                                 vm_object_shadow(&old_entry->object.vm_object,
2473                                         &old_entry->offset,
2474                                         atop(old_entry->end - old_entry->start));
2475                                 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
2476                                 /* Transfer the second reference too. */
2477                                 vm_object_reference(
2478                                     old_entry->object.vm_object);
2479                                 vm_object_deallocate(object);
2480                                 object = old_entry->object.vm_object;
2481                         }
2482                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
2483
2484                         /*
2485                          * Clone the entry, referencing the shared object.
2486                          */
2487                         new_entry = vm_map_entry_create(new_map);
2488                         *new_entry = *old_entry;
2489                         new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2490                         new_entry->wired_count = 0;
2491
2492                         /*
2493                          * Insert the entry into the new map -- we know we're
2494                          * inserting at the end of the new map.
2495                          */
2496
2497                         vm_map_entry_link(new_map, new_map->header.prev,
2498                             new_entry);
2499
2500                         /*
2501                          * Update the physical map
2502                          */
2503
2504                         pmap_copy(new_map->pmap, old_map->pmap,
2505                             new_entry->start,
2506                             (old_entry->end - old_entry->start),
2507                             old_entry->start);
2508                         break;
2509
2510                 case VM_INHERIT_COPY:
2511                         /*
2512                          * Clone the entry and link into the map.
2513                          */
2514                         new_entry = vm_map_entry_create(new_map);
2515                         *new_entry = *old_entry;
2516                         new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2517                         new_entry->wired_count = 0;
2518                         new_entry->object.vm_object = NULL;
2519                         vm_map_entry_link(new_map, new_map->header.prev,
2520                             new_entry);
2521                         vm_map_copy_entry(old_map, new_map, old_entry,
2522                             new_entry);
2523                         break;
2524                 }
2525                 old_entry = old_entry->next;
2526         }
2527
2528         new_map->size = old_map->size;
2529         old_map->infork = 0;
2530         vm_map_unlock(old_map);
2531
2532         return (vm2);
2533 }
2534
2535 int
2536 vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
2537               vm_prot_t prot, vm_prot_t max, int cow)
2538 {
2539         vm_map_entry_t prev_entry;
2540         vm_map_entry_t new_stack_entry;
2541         vm_size_t      init_ssize;
2542         int            rv;
2543
2544         if (VM_MIN_ADDRESS > 0 && addrbos < VM_MIN_ADDRESS)
2545                 return (KERN_NO_SPACE);
2546
2547         if (max_ssize < sgrowsiz)
2548                 init_ssize = max_ssize;
2549         else
2550                 init_ssize = sgrowsiz;
2551
2552         vm_map_lock(map);
2553
2554         /* If addr is already mapped, no go */
2555         if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
2556                 vm_map_unlock(map);
2557                 return (KERN_NO_SPACE);
2558         }
2559
2560         /* If we would blow our VMEM resource limit, no go */
2561         if (map->size + init_ssize >
2562             curproc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
2563                 vm_map_unlock(map);
2564                 return (KERN_NO_SPACE);
2565         }
2566
2567         /* If we can't accomodate max_ssize in the current mapping,
2568          * no go.  However, we need to be aware that subsequent user
2569          * mappings might map into the space we have reserved for
2570          * stack, and currently this space is not protected.  
2571          * 
2572          * Hopefully we will at least detect this condition 
2573          * when we try to grow the stack.
2574          */
2575         if ((prev_entry->next != &map->header) &&
2576             (prev_entry->next->start < addrbos + max_ssize)) {
2577                 vm_map_unlock(map);
2578                 return (KERN_NO_SPACE);
2579         }
2580
2581         /* We initially map a stack of only init_ssize.  We will
2582          * grow as needed later.  Since this is to be a grow 
2583          * down stack, we map at the top of the range.
2584          *
2585          * Note: we would normally expect prot and max to be
2586          * VM_PROT_ALL, and cow to be 0.  Possibly we should
2587          * eliminate these as input parameters, and just
2588          * pass these values here in the insert call.
2589          */
2590         rv = vm_map_insert(map, NULL, 0, addrbos + max_ssize - init_ssize,
2591                            addrbos + max_ssize, prot, max, cow);
2592
2593         /* Now set the avail_ssize amount */
2594         if (rv == KERN_SUCCESS){
2595                 if (prev_entry != &map->header)
2596                         vm_map_clip_end(map, prev_entry, addrbos + max_ssize - init_ssize);
2597                 new_stack_entry = prev_entry->next;
2598                 if (new_stack_entry->end   != addrbos + max_ssize ||
2599                     new_stack_entry->start != addrbos + max_ssize - init_ssize)
2600                         panic ("Bad entry start/end for new stack entry");
2601                 else 
2602                         new_stack_entry->avail_ssize = max_ssize - init_ssize;
2603         }
2604
2605         vm_map_unlock(map);
2606         return (rv);
2607 }
2608
2609 /* Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
2610  * desired address is already mapped, or if we successfully grow
2611  * the stack.  Also returns KERN_SUCCESS if addr is outside the
2612  * stack range (this is strange, but preserves compatibility with
2613  * the grow function in vm_machdep.c).
2614  */
2615 int
2616 vm_map_growstack (struct proc *p, vm_offset_t addr)
2617 {
2618         vm_map_entry_t prev_entry;
2619         vm_map_entry_t stack_entry;
2620         vm_map_entry_t new_stack_entry;
2621         struct vmspace *vm = p->p_vmspace;
2622         vm_map_t map = &vm->vm_map;
2623         vm_offset_t    end;
2624         int      grow_amount;
2625         int      rv = KERN_SUCCESS;
2626         int      is_procstack;
2627         int      use_read_lock = 1;
2628
2629 Retry:
2630         if (use_read_lock)
2631                 vm_map_lock_read(map);
2632         else
2633                 vm_map_lock(map);
2634
2635         /* If addr is already in the entry range, no need to grow.*/
2636         if (vm_map_lookup_entry(map, addr, &prev_entry))
2637                 goto done;
2638
2639         if ((stack_entry = prev_entry->next) == &map->header)
2640                 goto done;
2641         if (prev_entry == &map->header) 
2642                 end = stack_entry->start - stack_entry->avail_ssize;
2643         else
2644                 end = prev_entry->end;
2645
2646         /* This next test mimics the old grow function in vm_machdep.c.
2647          * It really doesn't quite make sense, but we do it anyway
2648          * for compatibility.
2649          *
2650          * If not growable stack, return success.  This signals the
2651          * caller to proceed as he would normally with normal vm.
2652          */
2653         if (stack_entry->avail_ssize < 1 ||
2654             addr >= stack_entry->start ||
2655             addr <  stack_entry->start - stack_entry->avail_ssize) {
2656                 goto done;
2657         } 
2658         
2659         /* Find the minimum grow amount */
2660         grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE);
2661         if (grow_amount > stack_entry->avail_ssize) {
2662                 rv = KERN_NO_SPACE;
2663                 goto done;
2664         }
2665
2666         /* If there is no longer enough space between the entries
2667          * nogo, and adjust the available space.  Note: this 
2668          * should only happen if the user has mapped into the
2669          * stack area after the stack was created, and is
2670          * probably an error.
2671          *
2672          * This also effectively destroys any guard page the user
2673          * might have intended by limiting the stack size.
2674          */
2675         if (grow_amount > stack_entry->start - end) {
2676                 if (use_read_lock && vm_map_lock_upgrade(map)) {
2677                         use_read_lock = 0;
2678                         goto Retry;
2679                 }
2680                 use_read_lock = 0;
2681                 stack_entry->avail_ssize = stack_entry->start - end;
2682                 rv = KERN_NO_SPACE;
2683                 goto done;
2684         }
2685
2686         is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr;
2687
2688         /* If this is the main process stack, see if we're over the 
2689          * stack limit.
2690          */
2691         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
2692                              p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
2693                 rv = KERN_NO_SPACE;
2694                 goto done;
2695         }
2696
2697         /* Round up the grow amount modulo SGROWSIZ */
2698         grow_amount = roundup (grow_amount, sgrowsiz);
2699         if (grow_amount > stack_entry->avail_ssize) {
2700                 grow_amount = stack_entry->avail_ssize;
2701         }
2702         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
2703                              p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
2704                 grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
2705                               ctob(vm->vm_ssize);
2706         }
2707
2708         /* If we would blow our VMEM resource limit, no go */
2709         if (map->size + grow_amount >
2710             curproc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
2711                 rv = KERN_NO_SPACE;
2712                 goto done;
2713         }
2714
2715         if (use_read_lock && vm_map_lock_upgrade(map)) {
2716                 use_read_lock = 0;
2717                 goto Retry;
2718         }
2719         use_read_lock = 0;
2720
2721         /* Get the preliminary new entry start value */
2722         addr = stack_entry->start - grow_amount;
2723
2724         /* If this puts us into the previous entry, cut back our growth
2725          * to the available space.  Also, see the note above.
2726          */
2727         if (addr < end) {
2728                 stack_entry->avail_ssize = stack_entry->start - end;
2729                 addr = end;
2730         }
2731
2732         rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start,
2733                            VM_PROT_ALL,
2734                            VM_PROT_ALL,
2735                            0);
2736
2737         /* Adjust the available stack space by the amount we grew. */
2738         if (rv == KERN_SUCCESS) {
2739                 if (prev_entry != &map->header)
2740                         vm_map_clip_end(map, prev_entry, addr);
2741                 new_stack_entry = prev_entry->next;
2742                 if (new_stack_entry->end   != stack_entry->start  ||
2743                     new_stack_entry->start != addr)
2744                         panic ("Bad stack grow start/end in new stack entry");
2745                 else {
2746                         new_stack_entry->avail_ssize = stack_entry->avail_ssize -
2747                                                         (new_stack_entry->end -
2748                                                          new_stack_entry->start);
2749                         if (is_procstack)
2750                                 vm->vm_ssize += btoc(new_stack_entry->end -
2751                                                      new_stack_entry->start);
2752                 }
2753         }
2754
2755 done:
2756         if (use_read_lock)
2757                 vm_map_unlock_read(map);
2758         else
2759                 vm_map_unlock(map);
2760         return (rv);
2761 }
2762
2763 /*
2764  * Unshare the specified VM space for exec.  If other processes are
2765  * mapped to it, then create a new one.  The new vmspace is null.
2766  */
2767
2768 void
2769 vmspace_exec(struct proc *p) {
2770         struct vmspace *oldvmspace = p->p_vmspace;
2771         struct vmspace *newvmspace;
2772         vm_map_t map = &p->p_vmspace->vm_map;
2773
2774         newvmspace = vmspace_alloc(map->min_offset, map->max_offset);
2775         bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
2776             (caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy);
2777         /*
2778          * This code is written like this for prototype purposes.  The
2779          * goal is to avoid running down the vmspace here, but let the
2780          * other process's that are still using the vmspace to finally
2781          * run it down.  Even though there is little or no chance of blocking
2782          * here, it is a good idea to keep this form for future mods.
2783          */
2784         vmspace_free(oldvmspace);
2785         p->p_vmspace = newvmspace;
2786         pmap_pinit2(vmspace_pmap(newvmspace));
2787         if (p == curproc)
2788                 pmap_activate(p);
2789 }
2790
2791 /*
2792  * Unshare the specified VM space for forcing COW.  This
2793  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
2794  */
2795
2796 void
2797 vmspace_unshare(struct proc *p) {
2798         struct vmspace *oldvmspace = p->p_vmspace;
2799         struct vmspace *newvmspace;
2800
2801         if (oldvmspace->vm_refcnt == 1)
2802                 return;
2803         newvmspace = vmspace_fork(oldvmspace);
2804         vmspace_free(oldvmspace);
2805         p->p_vmspace = newvmspace;
2806         pmap_pinit2(vmspace_pmap(newvmspace));
2807         if (p == curproc)
2808                 pmap_activate(p);
2809 }
2810         
2811
2812 /*
2813  *      vm_map_lookup:
2814  *
2815  *      Finds the VM object, offset, and
2816  *      protection for a given virtual address in the
2817  *      specified map, assuming a page fault of the
2818  *      type specified.
2819  *
2820  *      Leaves the map in question locked for read; return
2821  *      values are guaranteed until a vm_map_lookup_done
2822  *      call is performed.  Note that the map argument
2823  *      is in/out; the returned map must be used in
2824  *      the call to vm_map_lookup_done.
2825  *
2826  *      A handle (out_entry) is returned for use in
2827  *      vm_map_lookup_done, to make that fast.
2828  *
2829  *      If a lookup is requested with "write protection"
2830  *      specified, the map may be changed to perform virtual
2831  *      copying operations, although the data referenced will
2832  *      remain the same.
2833  */
2834 int
2835 vm_map_lookup(vm_map_t *var_map,                /* IN/OUT */
2836               vm_offset_t vaddr,
2837               vm_prot_t fault_typea,
2838               vm_map_entry_t *out_entry,        /* OUT */
2839               vm_object_t *object,              /* OUT */
2840               vm_pindex_t *pindex,              /* OUT */
2841               vm_prot_t *out_prot,              /* OUT */
2842               boolean_t *wired)                 /* OUT */
2843 {
2844         vm_map_entry_t entry;
2845         vm_map_t map = *var_map;
2846         vm_prot_t prot;
2847         vm_prot_t fault_type = fault_typea;
2848         int use_read_lock = 1;
2849         int rv = KERN_SUCCESS;
2850
2851 RetryLookup:
2852         if (use_read_lock)
2853                 vm_map_lock_read(map);
2854         else
2855                 vm_map_lock(map);
2856
2857         /*
2858          * If the map has an interesting hint, try it before calling full
2859          * blown lookup routine.
2860          */
2861         entry = map->hint;
2862         *out_entry = entry;
2863
2864         if ((entry == &map->header) ||
2865             (vaddr < entry->start) || (vaddr >= entry->end)) {
2866                 vm_map_entry_t tmp_entry;
2867
2868                 /*
2869                  * Entry was either not a valid hint, or the vaddr was not
2870                  * contained in the entry, so do a full lookup.
2871                  */
2872                 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) {
2873                         rv = KERN_INVALID_ADDRESS;
2874                         goto done;
2875                 }
2876
2877                 entry = tmp_entry;
2878                 *out_entry = entry;
2879         }
2880         
2881         /*
2882          * Handle submaps.
2883          */
2884
2885         if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
2886                 vm_map_t old_map = map;
2887
2888                 *var_map = map = entry->object.sub_map;
2889                 if (use_read_lock)
2890                         vm_map_unlock_read(old_map);
2891                 else
2892                         vm_map_unlock(old_map);
2893                 use_read_lock = 1;
2894                 goto RetryLookup;
2895         }
2896
2897         /*
2898          * Check whether this task is allowed to have this page.
2899          * Note the special case for MAP_ENTRY_COW
2900          * pages with an override.  This is to implement a forced
2901          * COW for debuggers.
2902          */
2903
2904         if (fault_type & VM_PROT_OVERRIDE_WRITE)
2905                 prot = entry->max_protection;
2906         else
2907                 prot = entry->protection;
2908
2909         fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
2910         if ((fault_type & prot) != fault_type) {
2911                 rv = KERN_PROTECTION_FAILURE;
2912                 goto done;
2913         }
2914
2915         if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
2916             (entry->eflags & MAP_ENTRY_COW) &&
2917             (fault_type & VM_PROT_WRITE) &&
2918             (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
2919                 rv = KERN_PROTECTION_FAILURE;
2920                 goto done;
2921         }
2922
2923         /*
2924          * If this page is not pageable, we have to get it for all possible
2925          * accesses.
2926          */
2927
2928         *wired = (entry->wired_count != 0);
2929         if (*wired)
2930                 prot = fault_type = entry->protection;
2931
2932         /*
2933          * If the entry was copy-on-write, we either ...
2934          */
2935
2936         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2937                 /*
2938                  * If we want to write the page, we may as well handle that
2939                  * now since we've got the map locked.
2940                  *
2941                  * If we don't need to write the page, we just demote the
2942                  * permissions allowed.
2943                  */
2944
2945                 if (fault_type & VM_PROT_WRITE) {
2946                         /*
2947                          * Make a new object, and place it in the object
2948                          * chain.  Note that no new references have appeared
2949                          * -- one just moved from the map to the new
2950                          * object.
2951                          */
2952
2953                         if (use_read_lock && vm_map_lock_upgrade(map)) {
2954                                 use_read_lock = 0;
2955                                 goto RetryLookup;
2956                         }
2957                         use_read_lock = 0;
2958
2959                         vm_object_shadow(
2960                             &entry->object.vm_object,
2961                             &entry->offset,
2962                             atop(entry->end - entry->start));
2963
2964                         entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
2965                 } else {
2966                         /*
2967                          * We're attempting to read a copy-on-write page --
2968                          * don't allow writes.
2969                          */
2970
2971                         prot &= ~VM_PROT_WRITE;
2972                 }
2973         }
2974
2975         /*
2976          * Create an object if necessary.
2977          */
2978         if (entry->object.vm_object == NULL &&
2979             !map->system_map) {
2980                 if (use_read_lock && vm_map_lock_upgrade(map))  {
2981                         use_read_lock = 0;
2982                         goto RetryLookup;
2983                 }
2984                 use_read_lock = 0;
2985                 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
2986                     atop(entry->end - entry->start));
2987                 entry->offset = 0;
2988         }
2989
2990         /*
2991          * Return the object/offset from this entry.  If the entry was
2992          * copy-on-write or empty, it has been fixed up.
2993          */
2994
2995         *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
2996         *object = entry->object.vm_object;
2997
2998         /*
2999          * Return whether this is the only map sharing this data.  On
3000          * success we return with a read lock held on the map.  On failure
3001          * we return with the map unlocked.
3002          */
3003         *out_prot = prot;
3004 done:
3005         if (rv == KERN_SUCCESS) {
3006                 if (use_read_lock == 0)
3007                         vm_map_lock_downgrade(map);
3008         } else if (use_read_lock) {
3009                 vm_map_unlock_read(map);
3010         } else {
3011                 vm_map_unlock(map);
3012         }
3013         return (rv);
3014 }
3015
3016 /*
3017  *      vm_map_lookup_done:
3018  *
3019  *      Releases locks acquired by a vm_map_lookup
3020  *      (according to the handle returned by that lookup).
3021  */
3022
3023 void
3024 vm_map_lookup_done(map, entry)
3025         vm_map_t map;
3026         vm_map_entry_t entry;
3027 {
3028         /*
3029          * Unlock the main-level map
3030          */
3031
3032         vm_map_unlock_read(map);
3033 }
3034
3035 /*
3036  * Implement uiomove with VM operations.  This handles (and collateral changes)
3037  * support every combination of source object modification, and COW type
3038  * operations.
3039  */
3040 int
3041 vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
3042         vm_map_t mapa;
3043         vm_object_t srcobject;
3044         off_t cp;
3045         int cnta;
3046         vm_offset_t uaddra;
3047         int *npages;
3048 {
3049         vm_map_t map;
3050         vm_object_t first_object, oldobject, object;
3051         vm_map_entry_t entry;
3052         vm_prot_t prot;
3053         boolean_t wired;
3054         int tcnt, rv;
3055         vm_offset_t uaddr, start, end, tend;
3056         vm_pindex_t first_pindex, osize, oindex;
3057         off_t ooffset;
3058         int cnt;
3059
3060         if (npages)
3061                 *npages = 0;
3062
3063         cnt = cnta;
3064         uaddr = uaddra;
3065
3066         while (cnt > 0) {
3067                 map = mapa;
3068
3069                 if ((vm_map_lookup(&map, uaddr,
3070                         VM_PROT_READ, &entry, &first_object,
3071                         &first_pindex, &prot, &wired)) != KERN_SUCCESS) {
3072                         return EFAULT;
3073                 }
3074
3075                 vm_map_clip_start(map, entry, uaddr);
3076
3077                 tcnt = cnt;
3078                 tend = uaddr + tcnt;
3079                 if (tend > entry->end) {
3080                         tcnt = entry->end - uaddr;
3081                         tend = entry->end;
3082                 }
3083
3084                 vm_map_clip_end(map, entry, tend);
3085
3086                 start = entry->start;
3087                 end = entry->end;
3088
3089                 osize = atop(tcnt);
3090
3091                 oindex = OFF_TO_IDX(cp);
3092                 if (npages) {
3093                         vm_pindex_t idx;
3094                         for (idx = 0; idx < osize; idx++) {
3095                                 vm_page_t m;
3096                                 if ((m = vm_page_lookup(srcobject, oindex + idx)) == NULL) {
3097                                         vm_map_lookup_done(map, entry);
3098                                         return 0;
3099                                 }
3100                                 /*
3101                                  * disallow busy or invalid pages, but allow
3102                                  * m->busy pages if they are entirely valid.
3103                                  */
3104                                 if ((m->flags & PG_BUSY) ||
3105                                         ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) {
3106                                         vm_map_lookup_done(map, entry);
3107                                         return 0;
3108                                 }
3109                         }
3110                 }
3111
3112 /*
3113  * If we are changing an existing map entry, just redirect
3114  * the object, and change mappings.
3115  */
3116                 if ((first_object->type == OBJT_VNODE) &&
3117                         ((oldobject = entry->object.vm_object) == first_object)) {
3118
3119                         if ((entry->offset != cp) || (oldobject != srcobject)) {
3120                                 /*
3121                                 * Remove old window into the file
3122                                 */
3123                                 pmap_remove (map->pmap, uaddr, tend);
3124
3125                                 /*
3126                                 * Force copy on write for mmaped regions
3127                                 */
3128                                 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
3129
3130                                 /*
3131                                 * Point the object appropriately
3132                                 */
3133                                 if (oldobject != srcobject) {
3134
3135                                 /*
3136                                 * Set the object optimization hint flag
3137                                 */
3138                                         vm_object_set_flag(srcobject, OBJ_OPT);
3139                                         vm_object_reference(srcobject);
3140                                         entry->object.vm_object = srcobject;
3141
3142                                         if (oldobject) {
3143                                                 vm_object_deallocate(oldobject);
3144                                         }
3145                                 }
3146
3147                                 entry->offset = cp;
3148                                 map->timestamp++;
3149                         } else {
3150                                 pmap_remove (map->pmap, uaddr, tend);
3151                         }
3152
3153                 } else if ((first_object->ref_count == 1) &&
3154                         (first_object->size == osize) &&
3155                         ((first_object->type == OBJT_DEFAULT) ||
3156                                 (first_object->type == OBJT_SWAP)) ) {
3157
3158                         oldobject = first_object->backing_object;
3159
3160                         if ((first_object->backing_object_offset != cp) ||
3161                                 (oldobject != srcobject)) {
3162                                 /*
3163                                 * Remove old window into the file
3164                                 */
3165                                 pmap_remove (map->pmap, uaddr, tend);
3166
3167                                 /*
3168                                  * Remove unneeded old pages
3169                                  */
3170                                 vm_object_page_remove(first_object, 0, 0, 0);
3171
3172                                 /*
3173                                  * Invalidate swap space
3174                                  */
3175                                 if (first_object->type == OBJT_SWAP) {
3176                                         swap_pager_freespace(first_object,
3177                                                 0,
3178                                                 first_object->size);
3179                                 }
3180
3181                                 /*
3182                                 * Force copy on write for mmaped regions
3183                                 */
3184                                 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
3185
3186                                 /*
3187                                 * Point the object appropriately
3188                                 */
3189                                 if (oldobject != srcobject) {
3190
3191                                 /*
3192                                 * Set the object optimization hint flag
3193                                 */
3194                                         vm_object_set_flag(srcobject, OBJ_OPT);
3195                                         vm_object_reference(srcobject);
3196
3197                                         if (oldobject) {
3198                                                 LIST_REMOVE(
3199                                                         first_object, shadow_list);
3200                                                 oldobject->shadow_count--;
3201                                                 /* XXX bump generation? */
3202                                                 vm_object_deallocate(oldobject);
3203                                         }
3204
3205                                         LIST_INSERT_HEAD(&srcobject->shadow_head,
3206                                                 first_object, shadow_list);
3207                                         srcobject->shadow_count++;
3208                                         /* XXX bump generation? */
3209
3210                                         first_object->backing_object = srcobject;
3211                                 }
3212                                 first_object->backing_object_offset = cp;
3213                                 map->timestamp++;
3214                         } else {
3215                                 pmap_remove (map->pmap, uaddr, tend);
3216                         }
3217 /*
3218  * Otherwise, we have to do a logical mmap.
3219  */
3220                 } else {
3221
3222                         vm_object_set_flag(srcobject, OBJ_OPT);
3223                         vm_object_reference(srcobject);
3224
3225                         pmap_remove (map->pmap, uaddr, tend);
3226
3227                         vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
3228                         vm_map_lock_upgrade(map);
3229
3230                         if (entry == &map->header) {
3231                                 map->first_free = &map->header;
3232                         } else if (map->first_free->start >= start) {
3233                                 map->first_free = entry->prev;
3234                         }
3235
3236                         SAVE_HINT(map, entry->prev);
3237                         vm_map_entry_delete(map, entry);
3238
3239                         object = srcobject;
3240                         ooffset = cp;
3241
3242                         rv = vm_map_insert(map, object, ooffset, start, tend,
3243                                 VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE);
3244
3245                         if (rv != KERN_SUCCESS)
3246                                 panic("vm_uiomove: could not insert new entry: %d", rv);
3247                 }
3248
3249 /*
3250  * Map the window directly, if it is already in memory
3251  */
3252                 pmap_object_init_pt(map->pmap, uaddr,
3253                         srcobject, oindex, tcnt, 0);
3254
3255                 map->timestamp++;
3256                 vm_map_unlock(map);
3257
3258                 cnt -= tcnt;
3259                 uaddr += tcnt;
3260                 cp += tcnt;
3261                 if (npages)
3262                         *npages += osize;
3263         }
3264         return 0;
3265 }
3266
3267 /*
3268  * Performs the copy_on_write operations necessary to allow the virtual copies
3269  * into user space to work.  This has to be called for write(2) system calls
3270  * from other processes, file unlinking, and file size shrinkage.
3271  */
3272 void
3273 vm_freeze_copyopts(object, froma, toa)
3274         vm_object_t object;
3275         vm_pindex_t froma, toa;
3276 {
3277         int rv;
3278         vm_object_t robject;
3279         vm_pindex_t idx;
3280
3281         if ((object == NULL) ||
3282                 ((object->flags & OBJ_OPT) == 0))
3283                 return;
3284
3285         if (object->shadow_count > object->ref_count)
3286                 panic("vm_freeze_copyopts: sc > rc");
3287
3288         while((robject = LIST_FIRST(&object->shadow_head)) != NULL) {
3289                 vm_pindex_t bo_pindex;
3290                 vm_page_t m_in, m_out;
3291
3292                 bo_pindex = OFF_TO_IDX(robject->backing_object_offset);
3293
3294                 vm_object_reference(robject);
3295
3296                 vm_object_pip_wait(robject, "objfrz");
3297
3298                 if (robject->ref_count == 1) {
3299                         vm_object_deallocate(robject);
3300                         continue;
3301                 }
3302
3303                 vm_object_pip_add(robject, 1);
3304
3305                 for (idx = 0; idx < robject->size; idx++) {
3306
3307                         m_out = vm_page_grab(robject, idx,
3308                                                 VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
3309
3310                         if (m_out->valid == 0) {
3311                                 m_in = vm_page_grab(object, bo_pindex + idx,
3312                                                 VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
3313                                 if (m_in->valid == 0) {
3314                                         rv = vm_pager_get_pages(object, &m_in, 1, 0);
3315                                         if (rv != VM_PAGER_OK) {
3316                                                 printf("vm_freeze_copyopts: cannot read page from file: %lx\n", (long)m_in->pindex);
3317                                                 continue;
3318                                         }
3319                                         vm_page_deactivate(m_in);
3320                                 }
3321
3322                                 vm_page_protect(m_in, VM_PROT_NONE);
3323                                 pmap_copy_page(VM_PAGE_TO_PHYS(m_in), VM_PAGE_TO_PHYS(m_out));
3324                                 m_out->valid = m_in->valid;
3325                                 vm_page_dirty(m_out);
3326                                 vm_page_activate(m_out);
3327                                 vm_page_wakeup(m_in);
3328                         }
3329                         vm_page_wakeup(m_out);
3330                 }
3331
3332                 object->shadow_count--;
3333                 object->ref_count--;
3334                 LIST_REMOVE(robject, shadow_list);
3335                 robject->backing_object = NULL;
3336                 robject->backing_object_offset = 0;
3337
3338                 vm_object_pip_wakeup(robject);
3339                 vm_object_deallocate(robject);
3340         }
3341
3342         vm_object_clear_flag(object, OBJ_OPT);
3343 }
3344
3345 #include "opt_ddb.h"
3346 #ifdef DDB
3347 #include <sys/kernel.h>
3348
3349 #include <ddb/ddb.h>
3350
3351 /*
3352  *      vm_map_print:   [ debug ]
3353  */
3354 DB_SHOW_COMMAND(map, vm_map_print)
3355 {
3356         static int nlines;
3357         /* XXX convert args. */
3358         vm_map_t map = (vm_map_t)addr;
3359         boolean_t full = have_addr;
3360
3361         vm_map_entry_t entry;
3362
3363         db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
3364             (void *)map,
3365             (void *)map->pmap, map->nentries, map->timestamp);
3366         nlines++;
3367
3368         if (!full && db_indent)
3369                 return;
3370
3371         db_indent += 2;
3372         for (entry = map->header.next; entry != &map->header;
3373             entry = entry->next) {
3374                 db_iprintf("map entry %p: start=%p, end=%p\n",
3375                     (void *)entry, (void *)entry->start, (void *)entry->end);
3376                 nlines++;
3377                 {
3378                         static char *inheritance_name[4] =
3379                         {"share", "copy", "none", "donate_copy"};
3380
3381                         db_iprintf(" prot=%x/%x/%s",
3382                             entry->protection,
3383                             entry->max_protection,
3384                             inheritance_name[(int)(unsigned char)entry->inheritance]);
3385                         if (entry->wired_count != 0)
3386                                 db_printf(", wired");
3387                 }
3388                 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
3389                         /* XXX no %qd in kernel.  Truncate entry->offset. */
3390                         db_printf(", share=%p, offset=0x%lx\n",
3391                             (void *)entry->object.sub_map,
3392                             (long)entry->offset);
3393                         nlines++;
3394                         if ((entry->prev == &map->header) ||
3395                             (entry->prev->object.sub_map !=
3396                                 entry->object.sub_map)) {
3397                                 db_indent += 2;
3398                                 vm_map_print((db_expr_t)(intptr_t)
3399                                              entry->object.sub_map,
3400                                              full, 0, (char *)0);
3401                                 db_indent -= 2;
3402                         }
3403                 } else {
3404                         /* XXX no %qd in kernel.  Truncate entry->offset. */
3405                         db_printf(", object=%p, offset=0x%lx",
3406                             (void *)entry->object.vm_object,
3407                             (long)entry->offset);
3408                         if (entry->eflags & MAP_ENTRY_COW)
3409                                 db_printf(", copy (%s)",
3410                                     (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
3411                         db_printf("\n");
3412                         nlines++;
3413
3414                         if ((entry->prev == &map->header) ||
3415                             (entry->prev->object.vm_object !=
3416                                 entry->object.vm_object)) {
3417                                 db_indent += 2;
3418                                 vm_object_print((db_expr_t)(intptr_t)
3419                                                 entry->object.vm_object,
3420                                                 full, 0, (char *)0);
3421                                 nlines += 4;
3422                                 db_indent -= 2;
3423                         }
3424                 }
3425         }
3426         db_indent -= 2;
3427         if (db_indent == 0)
3428                 nlines = 0;
3429 }
3430
3431
3432 DB_SHOW_COMMAND(procvm, procvm)
3433 {
3434         struct proc *p;
3435
3436         if (have_addr) {
3437                 p = (struct proc *) addr;
3438         } else {
3439                 p = curproc;
3440         }
3441
3442         db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
3443             (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
3444             (void *)vmspace_pmap(p->p_vmspace));
3445
3446         vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
3447 }
3448
3449 #endif /* DDB */