Merge from FreeBSD:
[dragonfly.git] / sys / vm / vm_map.c
1 /*
2  * Copyright (c) 1991, 1993
3  *      The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by the University of
19  *      California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *      from: @(#)vm_map.c      8.3 (Berkeley) 1/12/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $FreeBSD: src/sys/vm/vm_map.c,v 1.187.2.19 2003/05/27 00:47:02 alc Exp $
65  * $DragonFly: src/sys/vm/vm_map.c,v 1.17 2003/12/27 05:13:32 hsu Exp $
66  */
67
68 /*
69  *      Virtual memory mapping module.
70  */
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/proc.h>
75 #include <sys/lock.h>
76 #include <sys/vmmeter.h>
77 #include <sys/mman.h>
78 #include <sys/vnode.h>
79 #include <sys/resourcevar.h>
80 #include <sys/shm.h>
81
82 #include <vm/vm.h>
83 #include <vm/vm_param.h>
84 #include <vm/pmap.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_page.h>
87 #include <vm/vm_object.h>
88 #include <vm/vm_pager.h>
89 #include <vm/vm_kern.h>
90 #include <vm/vm_extern.h>
91 #include <vm/swap_pager.h>
92 #include <vm/vm_zone.h>
93
94 #include <sys/thread2.h>
95
96 /*
97  *      Virtual memory maps provide for the mapping, protection,
98  *      and sharing of virtual memory objects.  In addition,
99  *      this module provides for an efficient virtual copy of
100  *      memory from one map to another.
101  *
102  *      Synchronization is required prior to most operations.
103  *
104  *      Maps consist of an ordered doubly-linked list of simple
105  *      entries; a single hint is used to speed up lookups.
106  *
107  *      Since portions of maps are specified by start/end addresses,
108  *      which may not align with existing map entries, all
109  *      routines merely "clip" entries to these start/end values.
110  *      [That is, an entry is split into two, bordering at a
111  *      start or end value.]  Note that these clippings may not
112  *      always be necessary (as the two resulting entries are then
113  *      not changed); however, the clipping is done for convenience.
114  *
115  *      As mentioned above, virtual copy operations are performed
116  *      by copying VM object references from one map to
117  *      another, and then marking both regions as copy-on-write.
118  */
119
120 /*
121  *      vm_map_startup:
122  *
123  *      Initialize the vm_map module.  Must be called before
124  *      any other vm_map routines.
125  *
126  *      Map and entry structures are allocated from the general
127  *      purpose memory pool with some exceptions:
128  *
129  *      - The kernel map and kmem submap are allocated statically.
130  *      - Kernel map entries are allocated out of a static pool.
131  *
132  *      These restrictions are necessary since malloc() uses the
133  *      maps and requires map entries.
134  */
135
136 static struct vm_zone mapentzone_store, mapzone_store;
137 static vm_zone_t mapentzone, mapzone, vmspace_zone;
138 static struct vm_object mapentobj, mapobj;
139
140 static struct vm_map_entry map_entry_init[MAX_MAPENT];
141 static struct vm_map map_init[MAX_KMAP];
142
143 static vm_map_entry_t vm_map_entry_create(vm_map_t map, int *);
144 static void vm_map_entry_dispose (vm_map_t map, vm_map_entry_t entry, int *);
145 static void _vm_map_clip_end (vm_map_t, vm_map_entry_t, vm_offset_t, int *);
146 static void _vm_map_clip_start (vm_map_t, vm_map_entry_t, vm_offset_t, int *);
147 static void vm_map_entry_delete (vm_map_t, vm_map_entry_t, int *);
148 static void vm_map_entry_unwire (vm_map_t, vm_map_entry_t);
149 static void vm_map_copy_entry (vm_map_t, vm_map_t, vm_map_entry_t,
150                 vm_map_entry_t);
151 static void vm_map_split (vm_map_entry_t);
152 static void vm_map_unclip_range (vm_map_t map, vm_map_entry_t start_entry, vm_offset_t start, vm_offset_t end, int *count, int flags);
153
154 void
155 vm_map_startup()
156 {
157         mapzone = &mapzone_store;
158         zbootinit(mapzone, "MAP", sizeof (struct vm_map),
159                 map_init, MAX_KMAP);
160         mapentzone = &mapentzone_store;
161         zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry),
162                 map_entry_init, MAX_MAPENT);
163 }
164
165 /*
166  * Allocate a vmspace structure, including a vm_map and pmap,
167  * and initialize those structures.  The refcnt is set to 1.
168  * The remaining fields must be initialized by the caller.
169  */
170 struct vmspace *
171 vmspace_alloc(min, max)
172         vm_offset_t min, max;
173 {
174         struct vmspace *vm;
175
176         vm = zalloc(vmspace_zone);
177         vm_map_init(&vm->vm_map, min, max);
178         pmap_pinit(vmspace_pmap(vm));
179         vm->vm_map.pmap = vmspace_pmap(vm);             /* XXX */
180         vm->vm_refcnt = 1;
181         vm->vm_shm = NULL;
182         vm->vm_exitingcnt = 0;
183         return (vm);
184 }
185
186 void
187 vm_init2(void) 
188 {
189         zinitna(mapentzone, &mapentobj, NULL, 0, 0, ZONE_USE_RESERVE, 1);
190         zinitna(mapzone, &mapobj, NULL, 0, 0, 0, 1);
191         vmspace_zone = zinit("VMSPACE", sizeof (struct vmspace), 0, 0, 3);
192         pmap_init2();
193         vm_object_init2();
194 }
195
196 static __inline void
197 vmspace_dofree(struct vmspace *vm)
198 {
199         int count;
200
201         /*
202          * Make sure any SysV shm is freed, it might not have in
203          * exit1()
204          */
205         shmexit(vm);
206
207         KKASSERT(vm->vm_upcalls == NULL);
208
209         /*
210          * Lock the map, to wait out all other references to it.
211          * Delete all of the mappings and pages they hold, then call
212          * the pmap module to reclaim anything left.
213          */
214         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
215         vm_map_lock(&vm->vm_map);
216         vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
217                 vm->vm_map.max_offset, &count);
218         vm_map_unlock(&vm->vm_map);
219         vm_map_entry_release(count);
220
221         pmap_release(vmspace_pmap(vm));
222         zfree(vmspace_zone, vm);
223 }
224
225 void
226 vmspace_free(struct vmspace *vm)
227 {
228         if (vm->vm_refcnt == 0)
229                 panic("vmspace_free: attempt to free already freed vmspace");
230
231         if (--vm->vm_refcnt == 0 && vm->vm_exitingcnt == 0)
232                 vmspace_dofree(vm);
233 }
234
235 void
236 vmspace_exitfree(struct proc *p)
237 {
238         struct vmspace *vm;
239
240         vm = p->p_vmspace;
241         p->p_vmspace = NULL;
242
243         /*
244          * cleanup by parent process wait()ing on exiting child.  vm_refcnt
245          * may not be 0 (e.g. fork() and child exits without exec()ing).
246          * exitingcnt may increment above 0 and drop back down to zero
247          * several times while vm_refcnt is held non-zero.  vm_refcnt
248          * may also increment above 0 and drop back down to zero several
249          * times while vm_exitingcnt is held non-zero.
250          *
251          * The last wait on the exiting child's vmspace will clean up
252          * the remainder of the vmspace.
253          */
254         if (--vm->vm_exitingcnt == 0 && vm->vm_refcnt == 0)
255                 vmspace_dofree(vm);
256 }
257
258 /*
259  * vmspace_swap_count() - count the approximate swap useage in pages for a
260  *                        vmspace.
261  *
262  *      Swap useage is determined by taking the proportional swap used by
263  *      VM objects backing the VM map.  To make up for fractional losses,
264  *      if the VM object has any swap use at all the associated map entries
265  *      count for at least 1 swap page.
266  */
267 int
268 vmspace_swap_count(struct vmspace *vmspace)
269 {
270         vm_map_t map = &vmspace->vm_map;
271         vm_map_entry_t cur;
272         int count = 0;
273
274         for (cur = map->header.next; cur != &map->header; cur = cur->next) {
275                 vm_object_t object;
276
277                 if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
278                     (object = cur->object.vm_object) != NULL &&
279                     object->type == OBJT_SWAP
280                 ) {
281                         int n = (cur->end - cur->start) / PAGE_SIZE;
282
283                         if (object->un_pager.swp.swp_bcount) {
284                                 count += object->un_pager.swp.swp_bcount *
285                                     SWAP_META_PAGES * n / object->size + 1;
286                         }
287                 }
288         }
289         return(count);
290 }
291
292
293 /*
294  *      vm_map_create:
295  *
296  *      Creates and returns a new empty VM map with
297  *      the given physical map structure, and having
298  *      the given lower and upper address bounds.
299  */
300 vm_map_t
301 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
302 {
303         vm_map_t result;
304
305         result = zalloc(mapzone);
306         vm_map_init(result, min, max);
307         result->pmap = pmap;
308         return (result);
309 }
310
311 /*
312  * Initialize an existing vm_map structure
313  * such as that in the vmspace structure.
314  * The pmap is set elsewhere.
315  */
316 void
317 vm_map_init(struct vm_map *map, vm_offset_t min, vm_offset_t max)
318 {
319         map->header.next = map->header.prev = &map->header;
320         map->nentries = 0;
321         map->size = 0;
322         map->system_map = 0;
323         map->infork = 0;
324         map->min_offset = min;
325         map->max_offset = max;
326         map->first_free = &map->header;
327         map->hint = &map->header;
328         map->timestamp = 0;
329         lockinit(&map->lock, 0, "thrd_sleep", 0, LK_NOPAUSE);
330 }
331
332 /*
333  *      vm_map_entry_reserve:
334  *
335  *      Reserves vm_map_entry structures outside of the critical path
336  */
337 int
338 vm_map_entry_reserve(int count)
339 {
340         struct globaldata *gd = mycpu;
341         vm_map_entry_t entry;
342
343         crit_enter();
344         gd->gd_vme_avail -= count;
345
346         /*
347          * Make sure we have enough structures in gd_vme_base to handle
348          * the reservation request.
349          */
350         while (gd->gd_vme_avail < 0) {
351                 entry = zalloc(mapentzone);
352                 entry->next = gd->gd_vme_base;
353                 gd->gd_vme_base = entry;
354                 ++gd->gd_vme_avail;
355         }
356         crit_exit();
357         return(count);
358 }
359
360 /*
361  *      vm_map_entry_release:
362  *
363  *      Releases previously reserved vm_map_entry structures that were not
364  *      used.  If we have too much junk in our per-cpu cache clean some of
365  *      it out.
366  */
367 void
368 vm_map_entry_release(int count)
369 {
370         struct globaldata *gd = mycpu;
371         vm_map_entry_t entry;
372
373         crit_enter();
374         gd->gd_vme_avail += count;
375         while (gd->gd_vme_avail > MAP_RESERVE_SLOP) {
376                 entry = gd->gd_vme_base;
377                 KKASSERT(entry != NULL);
378                 gd->gd_vme_base = entry->next;
379                 --gd->gd_vme_avail;
380                 crit_exit();
381                 zfree(mapentzone, entry);
382                 crit_enter();
383         }
384         crit_exit();
385 }
386
387 /*
388  *      vm_map_entry_kreserve:
389  *
390  *      Reserve map entry structures for use in kernel_map or (if it exists)
391  *      kmem_map.  These entries have *ALREADY* been reserved on a per-cpu
392  *      basis.
393  *
394  *      XXX if multiple kernel map entries are used without any intervening
395  *      use by another map the KKASSERT() may assert.
396  */
397 int
398 vm_map_entry_kreserve(int count)
399 {
400         struct globaldata *gd = mycpu;
401
402         crit_enter();
403         gd->gd_vme_kdeficit += count;
404         crit_exit();
405         KKASSERT(gd->gd_vme_base != NULL);
406         return(count);
407 }
408
409 /*
410  *      vm_map_entry_krelease:
411  *
412  *      Release previously reserved map entries for kernel_map or kmem_map
413  *      use.  This routine determines how many entries were actually used and
414  *      replentishes the kernel reserve supply from vme_avail.
415  *
416  *      If there is insufficient supply vme_avail will go negative, which is
417  *      ok.  We cannot safely call zalloc in this function without getting
418  *      into a recursion deadlock.  zalloc() will call vm_map_entry_reserve()
419  *      to regenerate the lost entries.
420  */
421 void
422 vm_map_entry_krelease(int count)
423 {
424         struct globaldata *gd = mycpu;
425
426         crit_enter();
427         gd->gd_vme_kdeficit -= count;
428         gd->gd_vme_avail -= gd->gd_vme_kdeficit;        /* can go negative */
429         gd->gd_vme_kdeficit = 0;
430         crit_exit();
431 }
432
433 /*
434  *      vm_map_entry_create:    [ internal use only ]
435  *
436  *      Allocates a VM map entry for insertion.  No entry fields are filled 
437  *      in.
438  *
439  *      This routine may be called from an interrupt thread but not a FAST
440  *      interrupt.  This routine may recurse the map lock.
441  */
442 static vm_map_entry_t
443 vm_map_entry_create(vm_map_t map, int *countp)
444 {
445         struct globaldata *gd = mycpu;
446         vm_map_entry_t entry;
447
448         KKASSERT(*countp > 0);
449         --*countp;
450         crit_enter();
451         entry = gd->gd_vme_base;
452         KASSERT(entry != NULL, ("gd_vme_base NULL! count %d", *countp));
453         gd->gd_vme_base = entry->next;
454         crit_exit();
455         return(entry);
456 }
457
458 /*
459  *      vm_map_entry_dispose:   [ internal use only ]
460  *
461  *      Dispose of a vm_map_entry that is no longer being referenced.  This
462  *      function may be called from an interrupt.
463  */
464 static void
465 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry, int *countp)
466 {
467         struct globaldata *gd = mycpu;
468
469         ++*countp;
470         crit_enter();
471         entry->next = gd->gd_vme_base;
472         gd->gd_vme_base = entry;
473         crit_exit();
474 }
475
476
477 /*
478  *      vm_map_entry_{un,}link:
479  *
480  *      Insert/remove entries from maps.
481  */
482 static __inline void
483 vm_map_entry_link(vm_map_t map,
484                   vm_map_entry_t after_where,
485                   vm_map_entry_t entry)
486 {
487         map->nentries++;
488         entry->prev = after_where;
489         entry->next = after_where->next;
490         entry->next->prev = entry;
491         after_where->next = entry;
492 }
493
494 static __inline void
495 vm_map_entry_unlink(vm_map_t map,
496                     vm_map_entry_t entry)
497 {
498         vm_map_entry_t prev;
499         vm_map_entry_t next;
500
501         if (entry->eflags & MAP_ENTRY_IN_TRANSITION)
502                 panic("vm_map_entry_unlink: attempt to mess with locked entry! %p", entry);
503         prev = entry->prev;
504         next = entry->next;
505         next->prev = prev;
506         prev->next = next;
507         map->nentries--;
508 }
509
510 /*
511  *      SAVE_HINT:
512  *
513  *      Saves the specified entry as the hint for
514  *      future lookups.
515  */
516 #define SAVE_HINT(map,value) \
517                 (map)->hint = (value);
518
519 /*
520  *      vm_map_lookup_entry:    [ internal use only ]
521  *
522  *      Finds the map entry containing (or
523  *      immediately preceding) the specified address
524  *      in the given map; the entry is returned
525  *      in the "entry" parameter.  The boolean
526  *      result indicates whether the address is
527  *      actually contained in the map.
528  */
529 boolean_t
530 vm_map_lookup_entry(map, address, entry)
531         vm_map_t map;
532         vm_offset_t address;
533         vm_map_entry_t *entry;  /* OUT */
534 {
535         vm_map_entry_t cur;
536         vm_map_entry_t last;
537
538         /*
539          * Start looking either from the head of the list, or from the hint.
540          */
541
542         cur = map->hint;
543
544         if (cur == &map->header)
545                 cur = cur->next;
546
547         if (address >= cur->start) {
548                 /*
549                  * Go from hint to end of list.
550                  *
551                  * But first, make a quick check to see if we are already looking
552                  * at the entry we want (which is usually the case). Note also
553                  * that we don't need to save the hint here... it is the same
554                  * hint (unless we are at the header, in which case the hint
555                  * didn't buy us anything anyway).
556                  */
557                 last = &map->header;
558                 if ((cur != last) && (cur->end > address)) {
559                         *entry = cur;
560                         return (TRUE);
561                 }
562         } else {
563                 /*
564                  * Go from start to hint, *inclusively*
565                  */
566                 last = cur->next;
567                 cur = map->header.next;
568         }
569
570         /*
571          * Search linearly
572          */
573
574         while (cur != last) {
575                 if (cur->end > address) {
576                         if (address >= cur->start) {
577                                 /*
578                                  * Save this lookup for future hints, and
579                                  * return
580                                  */
581
582                                 *entry = cur;
583                                 SAVE_HINT(map, cur);
584                                 return (TRUE);
585                         }
586                         break;
587                 }
588                 cur = cur->next;
589         }
590         *entry = cur->prev;
591         SAVE_HINT(map, *entry);
592         return (FALSE);
593 }
594
595 /*
596  *      vm_map_insert:
597  *
598  *      Inserts the given whole VM object into the target
599  *      map at the specified address range.  The object's
600  *      size should match that of the address range.
601  *
602  *      Requires that the map be locked, and leaves it so.  Requires that
603  *      sufficient vm_map_entry structures have been reserved and tracks
604  *      the use via countp.
605  *
606  *      If object is non-NULL, ref count must be bumped by caller
607  *      prior to making call to account for the new entry.
608  */
609 int
610 vm_map_insert(vm_map_t map, int *countp,
611               vm_object_t object, vm_ooffset_t offset,
612               vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max,
613               int cow)
614 {
615         vm_map_entry_t new_entry;
616         vm_map_entry_t prev_entry;
617         vm_map_entry_t temp_entry;
618         vm_eflags_t protoeflags;
619
620         /*
621          * Check that the start and end points are not bogus.
622          */
623
624         if ((start < map->min_offset) || (end > map->max_offset) ||
625             (start >= end))
626                 return (KERN_INVALID_ADDRESS);
627
628         /*
629          * Find the entry prior to the proposed starting address; if it's part
630          * of an existing entry, this range is bogus.
631          */
632
633         if (vm_map_lookup_entry(map, start, &temp_entry))
634                 return (KERN_NO_SPACE);
635
636         prev_entry = temp_entry;
637
638         /*
639          * Assert that the next entry doesn't overlap the end point.
640          */
641
642         if ((prev_entry->next != &map->header) &&
643             (prev_entry->next->start < end))
644                 return (KERN_NO_SPACE);
645
646         protoeflags = 0;
647
648         if (cow & MAP_COPY_ON_WRITE)
649                 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
650
651         if (cow & MAP_NOFAULT) {
652                 protoeflags |= MAP_ENTRY_NOFAULT;
653
654                 KASSERT(object == NULL,
655                         ("vm_map_insert: paradoxical MAP_NOFAULT request"));
656         }
657         if (cow & MAP_DISABLE_SYNCER)
658                 protoeflags |= MAP_ENTRY_NOSYNC;
659         if (cow & MAP_DISABLE_COREDUMP)
660                 protoeflags |= MAP_ENTRY_NOCOREDUMP;
661
662         if (object) {
663                 /*
664                  * When object is non-NULL, it could be shared with another
665                  * process.  We have to set or clear OBJ_ONEMAPPING 
666                  * appropriately.
667                  */
668                 if ((object->ref_count > 1) || (object->shadow_count != 0)) {
669                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
670                 }
671         }
672         else if ((prev_entry != &map->header) &&
673                  (prev_entry->eflags == protoeflags) &&
674                  (prev_entry->end == start) &&
675                  (prev_entry->wired_count == 0) &&
676                  ((prev_entry->object.vm_object == NULL) ||
677                   vm_object_coalesce(prev_entry->object.vm_object,
678                                      OFF_TO_IDX(prev_entry->offset),
679                                      (vm_size_t)(prev_entry->end - prev_entry->start),
680                                      (vm_size_t)(end - prev_entry->end)))) {
681                 /*
682                  * We were able to extend the object.  Determine if we
683                  * can extend the previous map entry to include the 
684                  * new range as well.
685                  */
686                 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
687                     (prev_entry->protection == prot) &&
688                     (prev_entry->max_protection == max)) {
689                         map->size += (end - prev_entry->end);
690                         prev_entry->end = end;
691                         vm_map_simplify_entry(map, prev_entry, countp);
692                         return (KERN_SUCCESS);
693                 }
694
695                 /*
696                  * If we can extend the object but cannot extend the
697                  * map entry, we have to create a new map entry.  We
698                  * must bump the ref count on the extended object to
699                  * account for it.  object may be NULL.
700                  */
701                 object = prev_entry->object.vm_object;
702                 offset = prev_entry->offset +
703                         (prev_entry->end - prev_entry->start);
704                 vm_object_reference(object);
705         }
706
707         /*
708          * NOTE: if conditionals fail, object can be NULL here.  This occurs
709          * in things like the buffer map where we manage kva but do not manage
710          * backing objects.
711          */
712
713         /*
714          * Create a new entry
715          */
716
717         new_entry = vm_map_entry_create(map, countp);
718         new_entry->start = start;
719         new_entry->end = end;
720
721         new_entry->eflags = protoeflags;
722         new_entry->object.vm_object = object;
723         new_entry->offset = offset;
724         new_entry->avail_ssize = 0;
725
726         new_entry->inheritance = VM_INHERIT_DEFAULT;
727         new_entry->protection = prot;
728         new_entry->max_protection = max;
729         new_entry->wired_count = 0;
730
731         /*
732          * Insert the new entry into the list
733          */
734
735         vm_map_entry_link(map, prev_entry, new_entry);
736         map->size += new_entry->end - new_entry->start;
737
738         /*
739          * Update the free space hint
740          */
741         if ((map->first_free == prev_entry) &&
742             (prev_entry->end >= new_entry->start)) {
743                 map->first_free = new_entry;
744         }
745
746 #if 0
747         /*
748          * Temporarily removed to avoid MAP_STACK panic, due to
749          * MAP_STACK being a huge hack.  Will be added back in
750          * when MAP_STACK (and the user stack mapping) is fixed.
751          */
752         /*
753          * It may be possible to simplify the entry
754          */
755         vm_map_simplify_entry(map, new_entry, countp);
756 #endif
757
758         if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) {
759                 pmap_object_init_pt(map->pmap, start,
760                                     object, OFF_TO_IDX(offset), end - start,
761                                     cow & MAP_PREFAULT_PARTIAL);
762         }
763
764         return (KERN_SUCCESS);
765 }
766
767 /*
768  * Find sufficient space for `length' bytes in the given map, starting at
769  * `start'.  The map must be locked.  Returns 0 on success, 1 on no space.
770  *
771  * This function will returned an arbitrarily aligned pointer.  If no
772  * particular alignment is required you should pass align as 1.  Note that
773  * the map may return PAGE_SIZE aligned pointers if all the lengths used in
774  * the map are a multiple of PAGE_SIZE, even if you pass a smaller align
775  * argument.
776  *
777  * 'align' should be a power of 2 but is not required to be.
778  */
779 int
780 vm_map_findspace(
781         vm_map_t map,
782         vm_offset_t start,
783         vm_size_t length,
784         vm_offset_t align,
785         vm_offset_t *addr)
786 {
787         vm_map_entry_t entry, next;
788         vm_offset_t end;
789         vm_offset_t align_mask;
790
791         if (start < map->min_offset)
792                 start = map->min_offset;
793         if (start > map->max_offset)
794                 return (1);
795
796         /*
797          * If the alignment is not a power of 2 we will have to use
798          * a mod/division, set align_mask to a special value.
799          */
800         if ((align | (align - 1)) + 1 != (align << 1))
801                 align_mask = (vm_offset_t)-1;
802         else
803                 align_mask = align - 1;
804
805 retry:
806         /*
807          * Look for the first possible address; if there's already something
808          * at this address, we have to start after it.
809          */
810         if (start == map->min_offset) {
811                 if ((entry = map->first_free) != &map->header)
812                         start = entry->end;
813         } else {
814                 vm_map_entry_t tmp;
815
816                 if (vm_map_lookup_entry(map, start, &tmp))
817                         start = tmp->end;
818                 entry = tmp;
819         }
820
821         /*
822          * Look through the rest of the map, trying to fit a new region in the
823          * gap between existing regions, or after the very last region.
824          */
825         for (;; start = (entry = next)->end) {
826                 /*
827                  * Adjust the proposed start by the requested alignment,
828                  * be sure that we didn't wrap the address.
829                  */
830                 if (align_mask == (vm_offset_t)-1)
831                         end = ((start + align - 1) / align) * align;
832                 else
833                         end = (start + align_mask) & ~align_mask;
834                 if (end < start)
835                         return (1);
836                 start = end;
837                 /*
838                  * Find the end of the proposed new region.  Be sure we didn't
839                  * go beyond the end of the map, or wrap around the address.
840                  * Then check to see if this is the last entry or if the 
841                  * proposed end fits in the gap between this and the next
842                  * entry.
843                  */
844                 end = start + length;
845                 if (end > map->max_offset || end < start)
846                         return (1);
847                 next = entry->next;
848                 if (next == &map->header || next->start >= end)
849                         break;
850         }
851         SAVE_HINT(map, entry);
852         if (map == kernel_map) {
853                 vm_offset_t ksize;
854                 if ((ksize = round_page(start + length)) > kernel_vm_end) {
855                         pmap_growkernel(ksize);
856                         goto retry;
857                 }
858         }
859         *addr = start;
860         return (0);
861 }
862
863 /*
864  *      vm_map_find finds an unallocated region in the target address
865  *      map with the given length.  The search is defined to be
866  *      first-fit from the specified address; the region found is
867  *      returned in the same parameter.
868  *
869  *      If object is non-NULL, ref count must be bumped by caller
870  *      prior to making call to account for the new entry.
871  */
872 int
873 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
874             vm_offset_t *addr,  /* IN/OUT */
875             vm_size_t length, boolean_t find_space, vm_prot_t prot,
876             vm_prot_t max, int cow)
877 {
878         vm_offset_t start;
879         int result;
880         int count;
881
882         start = *addr;
883
884         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
885         vm_map_lock(map);
886         if (find_space) {
887                 if (vm_map_findspace(map, start, length, 1, addr)) {
888                         vm_map_unlock(map);
889                         vm_map_entry_release(count);
890                         return (KERN_NO_SPACE);
891                 }
892                 start = *addr;
893         }
894         result = vm_map_insert(map, &count, object, offset,
895                 start, start + length, prot, max, cow);
896         vm_map_unlock(map);
897         vm_map_entry_release(count);
898
899         return (result);
900 }
901
902 /*
903  *      vm_map_simplify_entry:
904  *
905  *      Simplify the given map entry by merging with either neighbor.  This
906  *      routine also has the ability to merge with both neighbors.
907  *
908  *      The map must be locked.
909  *
910  *      This routine guarentees that the passed entry remains valid (though
911  *      possibly extended).  When merging, this routine may delete one or
912  *      both neighbors.  No action is taken on entries which have their
913  *      in-transition flag set.
914  */
915 void
916 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry, int *countp)
917 {
918         vm_map_entry_t next, prev;
919         vm_size_t prevsize, esize;
920
921         if (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP)) {
922                 ++mycpu->gd_cnt.v_intrans_coll;
923                 return;
924         }
925
926         prev = entry->prev;
927         if (prev != &map->header) {
928                 prevsize = prev->end - prev->start;
929                 if ( (prev->end == entry->start) &&
930                      (prev->object.vm_object == entry->object.vm_object) &&
931                      (!prev->object.vm_object ||
932                         (prev->offset + prevsize == entry->offset)) &&
933                      (prev->eflags == entry->eflags) &&
934                      (prev->protection == entry->protection) &&
935                      (prev->max_protection == entry->max_protection) &&
936                      (prev->inheritance == entry->inheritance) &&
937                      (prev->wired_count == entry->wired_count)) {
938                         if (map->first_free == prev)
939                                 map->first_free = entry;
940                         if (map->hint == prev)
941                                 map->hint = entry;
942                         vm_map_entry_unlink(map, prev);
943                         entry->start = prev->start;
944                         entry->offset = prev->offset;
945                         if (prev->object.vm_object)
946                                 vm_object_deallocate(prev->object.vm_object);
947                         vm_map_entry_dispose(map, prev, countp);
948                 }
949         }
950
951         next = entry->next;
952         if (next != &map->header) {
953                 esize = entry->end - entry->start;
954                 if ((entry->end == next->start) &&
955                     (next->object.vm_object == entry->object.vm_object) &&
956                      (!entry->object.vm_object ||
957                         (entry->offset + esize == next->offset)) &&
958                     (next->eflags == entry->eflags) &&
959                     (next->protection == entry->protection) &&
960                     (next->max_protection == entry->max_protection) &&
961                     (next->inheritance == entry->inheritance) &&
962                     (next->wired_count == entry->wired_count)) {
963                         if (map->first_free == next)
964                                 map->first_free = entry;
965                         if (map->hint == next)
966                                 map->hint = entry;
967                         vm_map_entry_unlink(map, next);
968                         entry->end = next->end;
969                         if (next->object.vm_object)
970                                 vm_object_deallocate(next->object.vm_object);
971                         vm_map_entry_dispose(map, next, countp);
972                 }
973         }
974 }
975 /*
976  *      vm_map_clip_start:      [ internal use only ]
977  *
978  *      Asserts that the given entry begins at or after
979  *      the specified address; if necessary,
980  *      it splits the entry into two.
981  */
982 #define vm_map_clip_start(map, entry, startaddr, countp) \
983 { \
984         if (startaddr > entry->start) \
985                 _vm_map_clip_start(map, entry, startaddr, countp); \
986 }
987
988 /*
989  *      This routine is called only when it is known that
990  *      the entry must be split.
991  */
992 static void
993 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start, int *countp)
994 {
995         vm_map_entry_t new_entry;
996
997         /*
998          * Split off the front portion -- note that we must insert the new
999          * entry BEFORE this one, so that this entry has the specified
1000          * starting address.
1001          */
1002
1003         vm_map_simplify_entry(map, entry, countp);
1004
1005         /*
1006          * If there is no object backing this entry, we might as well create
1007          * one now.  If we defer it, an object can get created after the map
1008          * is clipped, and individual objects will be created for the split-up
1009          * map.  This is a bit of a hack, but is also about the best place to
1010          * put this improvement.
1011          */
1012
1013         if (entry->object.vm_object == NULL && !map->system_map) {
1014                 vm_object_t object;
1015                 object = vm_object_allocate(OBJT_DEFAULT,
1016                                 atop(entry->end - entry->start));
1017                 entry->object.vm_object = object;
1018                 entry->offset = 0;
1019         }
1020
1021         new_entry = vm_map_entry_create(map, countp);
1022         *new_entry = *entry;
1023
1024         new_entry->end = start;
1025         entry->offset += (start - entry->start);
1026         entry->start = start;
1027
1028         vm_map_entry_link(map, entry->prev, new_entry);
1029
1030         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1031                 vm_object_reference(new_entry->object.vm_object);
1032         }
1033 }
1034
1035 /*
1036  *      vm_map_clip_end:        [ internal use only ]
1037  *
1038  *      Asserts that the given entry ends at or before
1039  *      the specified address; if necessary,
1040  *      it splits the entry into two.
1041  */
1042
1043 #define vm_map_clip_end(map, entry, endaddr, countp) \
1044 { \
1045         if (endaddr < entry->end) \
1046                 _vm_map_clip_end(map, entry, endaddr, countp); \
1047 }
1048
1049 /*
1050  *      This routine is called only when it is known that
1051  *      the entry must be split.
1052  */
1053 static void
1054 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end, int *countp)
1055 {
1056         vm_map_entry_t new_entry;
1057
1058         /*
1059          * If there is no object backing this entry, we might as well create
1060          * one now.  If we defer it, an object can get created after the map
1061          * is clipped, and individual objects will be created for the split-up
1062          * map.  This is a bit of a hack, but is also about the best place to
1063          * put this improvement.
1064          */
1065
1066         if (entry->object.vm_object == NULL && !map->system_map) {
1067                 vm_object_t object;
1068                 object = vm_object_allocate(OBJT_DEFAULT,
1069                                 atop(entry->end - entry->start));
1070                 entry->object.vm_object = object;
1071                 entry->offset = 0;
1072         }
1073
1074         /*
1075          * Create a new entry and insert it AFTER the specified entry
1076          */
1077
1078         new_entry = vm_map_entry_create(map, countp);
1079         *new_entry = *entry;
1080
1081         new_entry->start = entry->end = end;
1082         new_entry->offset += (end - entry->start);
1083
1084         vm_map_entry_link(map, entry, new_entry);
1085
1086         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1087                 vm_object_reference(new_entry->object.vm_object);
1088         }
1089 }
1090
1091 /*
1092  *      VM_MAP_RANGE_CHECK:     [ internal use only ]
1093  *
1094  *      Asserts that the starting and ending region
1095  *      addresses fall within the valid range of the map.
1096  */
1097 #define VM_MAP_RANGE_CHECK(map, start, end)             \
1098                 {                                       \
1099                 if (start < vm_map_min(map))            \
1100                         start = vm_map_min(map);        \
1101                 if (end > vm_map_max(map))              \
1102                         end = vm_map_max(map);          \
1103                 if (start > end)                        \
1104                         start = end;                    \
1105                 }
1106
1107 /*
1108  *      vm_map_transition_wait: [ kernel use only ]
1109  *
1110  *      Used to block when an in-transition collison occurs.  The map
1111  *      is unlocked for the sleep and relocked before the return.
1112  */
1113 static
1114 void
1115 vm_map_transition_wait(vm_map_t map)
1116 {
1117         vm_map_unlock(map);
1118         tsleep(map, 0, "vment", 0);
1119         vm_map_lock(map);
1120 }
1121
1122 /*
1123  * CLIP_CHECK_BACK
1124  * CLIP_CHECK_FWD
1125  *
1126  *      When we do blocking operations with the map lock held it is
1127  *      possible that a clip might have occured on our in-transit entry,
1128  *      requiring an adjustment to the entry in our loop.  These macros
1129  *      help the pageable and clip_range code deal with the case.  The
1130  *      conditional costs virtually nothing if no clipping has occured.
1131  */
1132
1133 #define CLIP_CHECK_BACK(entry, save_start)              \
1134     do {                                                \
1135             while (entry->start != save_start) {        \
1136                     entry = entry->prev;                \
1137                     KASSERT(entry != &map->header, ("bad entry clip")); \
1138             }                                           \
1139     } while(0)
1140
1141 #define CLIP_CHECK_FWD(entry, save_end)                 \
1142     do {                                                \
1143             while (entry->end != save_end) {            \
1144                     entry = entry->next;                \
1145                     KASSERT(entry != &map->header, ("bad entry clip")); \
1146             }                                           \
1147     } while(0)
1148
1149
1150 /*
1151  *      vm_map_clip_range:      [ kernel use only ]
1152  *
1153  *      Clip the specified range and return the base entry.  The
1154  *      range may cover several entries starting at the returned base
1155  *      and the first and last entry in the covering sequence will be
1156  *      properly clipped to the requested start and end address.
1157  *
1158  *      If no holes are allowed you should pass the MAP_CLIP_NO_HOLES
1159  *      flag.  
1160  *
1161  *      The MAP_ENTRY_IN_TRANSITION flag will be set for the entries
1162  *      covered by the requested range.
1163  *
1164  *      The map must be exclusively locked on entry and will remain locked
1165  *      on return. If no range exists or the range contains holes and you
1166  *      specified that no holes were allowed, NULL will be returned.  This
1167  *      routine may temporarily unlock the map in order avoid a deadlock when
1168  *      sleeping.
1169  */
1170 static
1171 vm_map_entry_t
1172 vm_map_clip_range(vm_map_t map, vm_offset_t start, vm_offset_t end, 
1173         int *countp, int flags)
1174 {
1175         vm_map_entry_t start_entry;
1176         vm_map_entry_t entry;
1177
1178         /*
1179          * Locate the entry and effect initial clipping.  The in-transition
1180          * case does not occur very often so do not try to optimize it.
1181          */
1182 again:
1183         if (vm_map_lookup_entry(map, start, &start_entry) == FALSE)
1184                 return (NULL);
1185         entry = start_entry;
1186         if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1187                 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1188                 ++mycpu->gd_cnt.v_intrans_coll;
1189                 ++mycpu->gd_cnt.v_intrans_wait;
1190                 vm_map_transition_wait(map);
1191                 /*
1192                  * entry and/or start_entry may have been clipped while
1193                  * we slept, or may have gone away entirely.  We have
1194                  * to restart from the lookup.
1195                  */
1196                 goto again;
1197         }
1198         /*
1199          * Since we hold an exclusive map lock we do not have to restart
1200          * after clipping, even though clipping may block in zalloc.
1201          */
1202         vm_map_clip_start(map, entry, start, countp);
1203         vm_map_clip_end(map, entry, end, countp);
1204         entry->eflags |= MAP_ENTRY_IN_TRANSITION;
1205
1206         /*
1207          * Scan entries covered by the range.  When working on the next
1208          * entry a restart need only re-loop on the current entry which
1209          * we have already locked, since 'next' may have changed.  Also,
1210          * even though entry is safe, it may have been clipped so we
1211          * have to iterate forwards through the clip after sleeping.
1212          */
1213         while (entry->next != &map->header && entry->next->start < end) {
1214                 vm_map_entry_t next = entry->next;
1215
1216                 if (flags & MAP_CLIP_NO_HOLES) {
1217                         if (next->start > entry->end) {
1218                                 vm_map_unclip_range(map, start_entry,
1219                                         start, entry->end, countp, flags);
1220                                 return(NULL);
1221                         }
1222                 }
1223
1224                 if (next->eflags & MAP_ENTRY_IN_TRANSITION) {
1225                         vm_offset_t save_end = entry->end;
1226                         next->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1227                         ++mycpu->gd_cnt.v_intrans_coll;
1228                         ++mycpu->gd_cnt.v_intrans_wait;
1229                         vm_map_transition_wait(map);
1230
1231                         /*
1232                          * clips might have occured while we blocked.
1233                          */
1234                         CLIP_CHECK_FWD(entry, save_end);
1235                         CLIP_CHECK_BACK(start_entry, start);
1236                         continue;
1237                 }
1238                 /*
1239                  * No restart necessary even though clip_end may block, we
1240                  * are holding the map lock.
1241                  */
1242                 vm_map_clip_end(map, next, end, countp);
1243                 next->eflags |= MAP_ENTRY_IN_TRANSITION;
1244                 entry = next;
1245         }
1246         if (flags & MAP_CLIP_NO_HOLES) {
1247                 if (entry->end != end) {
1248                         vm_map_unclip_range(map, start_entry,
1249                                 start, entry->end, countp, flags);
1250                         return(NULL);
1251                 }
1252         }
1253         return(start_entry);
1254 }
1255
1256 /*
1257  *      vm_map_unclip_range:    [ kernel use only ]
1258  *
1259  *      Undo the effect of vm_map_clip_range().  You should pass the same
1260  *      flags and the same range that you passed to vm_map_clip_range().
1261  *      This code will clear the in-transition flag on the entries and
1262  *      wake up anyone waiting.  This code will also simplify the sequence 
1263  *      and attempt to merge it with entries before and after the sequence.
1264  *
1265  *      The map must be locked on entry and will remain locked on return.
1266  *
1267  *      Note that you should also pass the start_entry returned by 
1268  *      vm_map_clip_range().  However, if you block between the two calls
1269  *      with the map unlocked please be aware that the start_entry may
1270  *      have been clipped and you may need to scan it backwards to find
1271  *      the entry corresponding with the original start address.  You are
1272  *      responsible for this, vm_map_unclip_range() expects the correct
1273  *      start_entry to be passed to it and will KASSERT otherwise.
1274  */
1275 static
1276 void
1277 vm_map_unclip_range(
1278         vm_map_t map,
1279         vm_map_entry_t start_entry,
1280         vm_offset_t start,
1281         vm_offset_t end,
1282         int *countp,
1283         int flags)
1284 {
1285         vm_map_entry_t entry;
1286
1287         entry = start_entry;
1288
1289         KASSERT(entry->start == start, ("unclip_range: illegal base entry"));
1290         while (entry != &map->header && entry->start < end) {
1291                 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION, ("in-transition flag not set during unclip on: %p", entry));
1292                 KASSERT(entry->end <= end, ("unclip_range: tail wasn't clipped"));
1293                 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
1294                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
1295                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
1296                         wakeup(map);
1297                 }
1298                 entry = entry->next;
1299         }
1300
1301         /*
1302          * Simplification does not block so there is no restart case.
1303          */
1304         entry = start_entry;
1305         while (entry != &map->header && entry->start < end) {
1306                 vm_map_simplify_entry(map, entry, countp);
1307                 entry = entry->next;
1308         }
1309 }
1310
1311 /*
1312  *      vm_map_submap:          [ kernel use only ]
1313  *
1314  *      Mark the given range as handled by a subordinate map.
1315  *
1316  *      This range must have been created with vm_map_find,
1317  *      and no other operations may have been performed on this
1318  *      range prior to calling vm_map_submap.
1319  *
1320  *      Only a limited number of operations can be performed
1321  *      within this rage after calling vm_map_submap:
1322  *              vm_fault
1323  *      [Don't try vm_map_copy!]
1324  *
1325  *      To remove a submapping, one must first remove the
1326  *      range from the superior map, and then destroy the
1327  *      submap (if desired).  [Better yet, don't try it.]
1328  */
1329 int
1330 vm_map_submap(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_map_t submap)
1331 {
1332         vm_map_entry_t entry;
1333         int result = KERN_INVALID_ARGUMENT;
1334         int count;
1335
1336         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1337         vm_map_lock(map);
1338
1339         VM_MAP_RANGE_CHECK(map, start, end);
1340
1341         if (vm_map_lookup_entry(map, start, &entry)) {
1342                 vm_map_clip_start(map, entry, start, &count);
1343         } else {
1344                 entry = entry->next;
1345         }
1346
1347         vm_map_clip_end(map, entry, end, &count);
1348
1349         if ((entry->start == start) && (entry->end == end) &&
1350             ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1351             (entry->object.vm_object == NULL)) {
1352                 entry->object.sub_map = submap;
1353                 entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
1354                 result = KERN_SUCCESS;
1355         }
1356         vm_map_unlock(map);
1357         vm_map_entry_release(count);
1358
1359         return (result);
1360 }
1361
1362 /*
1363  *      vm_map_protect:
1364  *
1365  *      Sets the protection of the specified address
1366  *      region in the target map.  If "set_max" is
1367  *      specified, the maximum protection is to be set;
1368  *      otherwise, only the current protection is affected.
1369  */
1370 int
1371 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1372                vm_prot_t new_prot, boolean_t set_max)
1373 {
1374         vm_map_entry_t current;
1375         vm_map_entry_t entry;
1376         int count;
1377
1378         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1379         vm_map_lock(map);
1380
1381         VM_MAP_RANGE_CHECK(map, start, end);
1382
1383         if (vm_map_lookup_entry(map, start, &entry)) {
1384                 vm_map_clip_start(map, entry, start, &count);
1385         } else {
1386                 entry = entry->next;
1387         }
1388
1389         /*
1390          * Make a first pass to check for protection violations.
1391          */
1392
1393         current = entry;
1394         while ((current != &map->header) && (current->start < end)) {
1395                 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1396                         vm_map_unlock(map);
1397                         vm_map_entry_release(count);
1398                         return (KERN_INVALID_ARGUMENT);
1399                 }
1400                 if ((new_prot & current->max_protection) != new_prot) {
1401                         vm_map_unlock(map);
1402                         vm_map_entry_release(count);
1403                         return (KERN_PROTECTION_FAILURE);
1404                 }
1405                 current = current->next;
1406         }
1407
1408         /*
1409          * Go back and fix up protections. [Note that clipping is not
1410          * necessary the second time.]
1411          */
1412         current = entry;
1413
1414         while ((current != &map->header) && (current->start < end)) {
1415                 vm_prot_t old_prot;
1416
1417                 vm_map_clip_end(map, current, end, &count);
1418
1419                 old_prot = current->protection;
1420                 if (set_max)
1421                         current->protection =
1422                             (current->max_protection = new_prot) &
1423                             old_prot;
1424                 else
1425                         current->protection = new_prot;
1426
1427                 /*
1428                  * Update physical map if necessary. Worry about copy-on-write
1429                  * here -- CHECK THIS XXX
1430                  */
1431
1432                 if (current->protection != old_prot) {
1433 #define MASK(entry)     (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1434                                                         VM_PROT_ALL)
1435
1436                         pmap_protect(map->pmap, current->start,
1437                             current->end,
1438                             current->protection & MASK(current));
1439 #undef  MASK
1440                 }
1441
1442                 vm_map_simplify_entry(map, current, &count);
1443
1444                 current = current->next;
1445         }
1446
1447         vm_map_unlock(map);
1448         vm_map_entry_release(count);
1449         return (KERN_SUCCESS);
1450 }
1451
1452 /*
1453  *      vm_map_madvise:
1454  *
1455  *      This routine traverses a processes map handling the madvise
1456  *      system call.  Advisories are classified as either those effecting
1457  *      the vm_map_entry structure, or those effecting the underlying 
1458  *      objects.
1459  */
1460
1461 int
1462 vm_map_madvise(vm_map_t map, vm_offset_t start, vm_offset_t end, int behav)
1463 {
1464         vm_map_entry_t current, entry;
1465         int modify_map = 0;
1466         int count;
1467
1468         /*
1469          * Some madvise calls directly modify the vm_map_entry, in which case
1470          * we need to use an exclusive lock on the map and we need to perform 
1471          * various clipping operations.  Otherwise we only need a read-lock
1472          * on the map.
1473          */
1474
1475         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1476
1477         switch(behav) {
1478         case MADV_NORMAL:
1479         case MADV_SEQUENTIAL:
1480         case MADV_RANDOM:
1481         case MADV_NOSYNC:
1482         case MADV_AUTOSYNC:
1483         case MADV_NOCORE:
1484         case MADV_CORE:
1485                 modify_map = 1;
1486                 vm_map_lock(map);
1487                 break;
1488         case MADV_WILLNEED:
1489         case MADV_DONTNEED:
1490         case MADV_FREE:
1491                 vm_map_lock_read(map);
1492                 break;
1493         default:
1494                 vm_map_entry_release(count);
1495                 return (KERN_INVALID_ARGUMENT);
1496         }
1497
1498         /*
1499          * Locate starting entry and clip if necessary.
1500          */
1501
1502         VM_MAP_RANGE_CHECK(map, start, end);
1503
1504         if (vm_map_lookup_entry(map, start, &entry)) {
1505                 if (modify_map)
1506                         vm_map_clip_start(map, entry, start, &count);
1507         } else {
1508                 entry = entry->next;
1509         }
1510
1511         if (modify_map) {
1512                 /*
1513                  * madvise behaviors that are implemented in the vm_map_entry.
1514                  *
1515                  * We clip the vm_map_entry so that behavioral changes are
1516                  * limited to the specified address range.
1517                  */
1518                 for (current = entry;
1519                      (current != &map->header) && (current->start < end);
1520                      current = current->next
1521                 ) {
1522                         if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
1523                                 continue;
1524
1525                         vm_map_clip_end(map, current, end, &count);
1526
1527                         switch (behav) {
1528                         case MADV_NORMAL:
1529                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
1530                                 break;
1531                         case MADV_SEQUENTIAL:
1532                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
1533                                 break;
1534                         case MADV_RANDOM:
1535                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
1536                                 break;
1537                         case MADV_NOSYNC:
1538                                 current->eflags |= MAP_ENTRY_NOSYNC;
1539                                 break;
1540                         case MADV_AUTOSYNC:
1541                                 current->eflags &= ~MAP_ENTRY_NOSYNC;
1542                                 break;
1543                         case MADV_NOCORE:
1544                                 current->eflags |= MAP_ENTRY_NOCOREDUMP;
1545                                 break;
1546                         case MADV_CORE:
1547                                 current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
1548                                 break;
1549                         default:
1550                                 break;
1551                         }
1552                         vm_map_simplify_entry(map, current, &count);
1553                 }
1554                 vm_map_unlock(map);
1555         } else {
1556                 vm_pindex_t pindex;
1557                 int count;
1558
1559                 /*
1560                  * madvise behaviors that are implemented in the underlying
1561                  * vm_object.
1562                  *
1563                  * Since we don't clip the vm_map_entry, we have to clip
1564                  * the vm_object pindex and count.
1565                  */
1566                 for (current = entry;
1567                      (current != &map->header) && (current->start < end);
1568                      current = current->next
1569                 ) {
1570                         vm_offset_t useStart;
1571
1572                         if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
1573                                 continue;
1574
1575                         pindex = OFF_TO_IDX(current->offset);
1576                         count = atop(current->end - current->start);
1577                         useStart = current->start;
1578
1579                         if (current->start < start) {
1580                                 pindex += atop(start - current->start);
1581                                 count -= atop(start - current->start);
1582                                 useStart = start;
1583                         }
1584                         if (current->end > end)
1585                                 count -= atop(current->end - end);
1586
1587                         if (count <= 0)
1588                                 continue;
1589
1590                         vm_object_madvise(current->object.vm_object,
1591                                           pindex, count, behav);
1592                         if (behav == MADV_WILLNEED) {
1593                                 pmap_object_init_pt(
1594                                     map->pmap, 
1595                                     useStart,
1596                                     current->object.vm_object,
1597                                     pindex, 
1598                                     (count << PAGE_SHIFT),
1599                                     MAP_PREFAULT_MADVISE
1600                                 );
1601                         }
1602                 }
1603                 vm_map_unlock_read(map);
1604         }
1605         vm_map_entry_release(count);
1606         return(0);
1607 }       
1608
1609
1610 /*
1611  *      vm_map_inherit:
1612  *
1613  *      Sets the inheritance of the specified address
1614  *      range in the target map.  Inheritance
1615  *      affects how the map will be shared with
1616  *      child maps at the time of vm_map_fork.
1617  */
1618 int
1619 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
1620                vm_inherit_t new_inheritance)
1621 {
1622         vm_map_entry_t entry;
1623         vm_map_entry_t temp_entry;
1624         int count;
1625
1626         switch (new_inheritance) {
1627         case VM_INHERIT_NONE:
1628         case VM_INHERIT_COPY:
1629         case VM_INHERIT_SHARE:
1630                 break;
1631         default:
1632                 return (KERN_INVALID_ARGUMENT);
1633         }
1634
1635         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1636         vm_map_lock(map);
1637
1638         VM_MAP_RANGE_CHECK(map, start, end);
1639
1640         if (vm_map_lookup_entry(map, start, &temp_entry)) {
1641                 entry = temp_entry;
1642                 vm_map_clip_start(map, entry, start, &count);
1643         } else
1644                 entry = temp_entry->next;
1645
1646         while ((entry != &map->header) && (entry->start < end)) {
1647                 vm_map_clip_end(map, entry, end, &count);
1648
1649                 entry->inheritance = new_inheritance;
1650
1651                 vm_map_simplify_entry(map, entry, &count);
1652
1653                 entry = entry->next;
1654         }
1655         vm_map_unlock(map);
1656         vm_map_entry_release(count);
1657         return (KERN_SUCCESS);
1658 }
1659
1660 /*
1661  * Implement the semantics of mlock
1662  */
1663 int
1664 vm_map_unwire(map, start, real_end, new_pageable)
1665         vm_map_t map;
1666         vm_offset_t start;
1667         vm_offset_t real_end;
1668         boolean_t new_pageable;
1669 {
1670         vm_map_entry_t entry;
1671         vm_map_entry_t start_entry;
1672         vm_offset_t end;
1673         int rv = KERN_SUCCESS;
1674         int count;
1675
1676         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1677         vm_map_lock(map);
1678         VM_MAP_RANGE_CHECK(map, start, real_end);
1679         end = real_end;
1680
1681         start_entry = vm_map_clip_range(map, start, end, &count, MAP_CLIP_NO_HOLES);
1682         if (start_entry == NULL) {
1683                 vm_map_unlock(map);
1684                 vm_map_entry_release(count);
1685                 return (KERN_INVALID_ADDRESS);
1686         }
1687
1688         if (new_pageable == 0) {
1689                 entry = start_entry;
1690                 while ((entry != &map->header) && (entry->start < end)) {
1691                         vm_offset_t save_start;
1692                         vm_offset_t save_end;
1693
1694                         /*
1695                          * Already user wired or hard wired (trivial cases)
1696                          */
1697                         if (entry->eflags & MAP_ENTRY_USER_WIRED) {
1698                                 entry = entry->next;
1699                                 continue;
1700                         }
1701                         if (entry->wired_count != 0) {
1702                                 entry->wired_count++;
1703                                 entry->eflags |= MAP_ENTRY_USER_WIRED;
1704                                 entry = entry->next;
1705                                 continue;
1706                         }
1707
1708                         /*
1709                          * A new wiring requires instantiation of appropriate
1710                          * management structures and the faulting in of the
1711                          * page.
1712                          */
1713                         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1714                                 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
1715                                 if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) {
1716
1717                                         vm_object_shadow(&entry->object.vm_object,
1718                                             &entry->offset,
1719                                             atop(entry->end - entry->start));
1720                                         entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
1721
1722                                 } else if (entry->object.vm_object == NULL &&
1723                                            !map->system_map) {
1724
1725                                         entry->object.vm_object =
1726                                             vm_object_allocate(OBJT_DEFAULT,
1727                                                 atop(entry->end - entry->start));
1728                                         entry->offset = (vm_offset_t) 0;
1729
1730                                 }
1731                         }
1732                         entry->wired_count++;
1733                         entry->eflags |= MAP_ENTRY_USER_WIRED;
1734
1735                         /*
1736                          * Now fault in the area.  The map lock needs to be
1737                          * manipulated to avoid deadlocks.  The in-transition
1738                          * flag protects the entries. 
1739                          */
1740                         save_start = entry->start;
1741                         save_end = entry->end;
1742                         vm_map_unlock(map);
1743                         map->timestamp++;
1744                         rv = vm_fault_user_wire(map, save_start, save_end);
1745                         vm_map_lock(map);
1746                         if (rv) {
1747                                 CLIP_CHECK_BACK(entry, save_start);
1748                                 for (;;) {
1749                                         KASSERT(entry->wired_count == 1, ("bad wired_count on entry"));
1750                                         entry->eflags &= ~MAP_ENTRY_USER_WIRED;
1751                                         entry->wired_count = 0;
1752                                         if (entry->end == save_end)
1753                                                 break;
1754                                         entry = entry->next;
1755                                         KASSERT(entry != &map->header, ("bad entry clip during backout"));
1756                                 }
1757                                 end = save_start;       /* unwire the rest */
1758                                 break;
1759                         }
1760                         /*
1761                          * note that even though the entry might have been
1762                          * clipped, the USER_WIRED flag we set prevents
1763                          * duplication so we do not have to do a 
1764                          * clip check.
1765                          */
1766                         entry = entry->next;
1767                 }
1768
1769                 /*
1770                  * If we failed fall through to the unwiring section to
1771                  * unwire what we had wired so far.  'end' has already
1772                  * been adjusted.
1773                  */
1774                 if (rv)
1775                         new_pageable = 1;
1776
1777                 /*
1778                  * start_entry might have been clipped if we unlocked the
1779                  * map and blocked.  No matter how clipped it has gotten
1780                  * there should be a fragment that is on our start boundary.
1781                  */
1782                 CLIP_CHECK_BACK(start_entry, start);
1783         }
1784
1785         /*
1786          * Deal with the unwiring case.
1787          */
1788         if (new_pageable) {
1789                 /*
1790                  * This is the unwiring case.  We must first ensure that the
1791                  * range to be unwired is really wired down.  We know there
1792                  * are no holes.
1793                  */
1794                 entry = start_entry;
1795                 while ((entry != &map->header) && (entry->start < end)) {
1796                         if ((entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
1797                                 rv = KERN_INVALID_ARGUMENT;
1798                                 goto done;
1799                         }
1800                         KASSERT(entry->wired_count != 0, ("wired count was 0 with USER_WIRED set! %p", entry));
1801                         entry = entry->next;
1802                 }
1803
1804                 /*
1805                  * Now decrement the wiring count for each region. If a region
1806                  * becomes completely unwired, unwire its physical pages and
1807                  * mappings.
1808                  */
1809                 /*
1810                  * The map entries are processed in a loop, checking to
1811                  * make sure the entry is wired and asserting it has a wired
1812                  * count. However, another loop was inserted more-or-less in
1813                  * the middle of the unwiring path. This loop picks up the
1814                  * "entry" loop variable from the first loop without first
1815                  * setting it to start_entry. Naturally, the secound loop
1816                  * is never entered and the pages backing the entries are
1817                  * never unwired. This can lead to a leak of wired pages.
1818                  */
1819                 entry = start_entry;
1820                 while ((entry != &map->header) && (entry->start < end)) {
1821                         KASSERT(entry->eflags & MAP_ENTRY_USER_WIRED, ("expected USER_WIRED on entry %p", entry));
1822                         entry->eflags &= ~MAP_ENTRY_USER_WIRED;
1823                         entry->wired_count--;
1824                         if (entry->wired_count == 0)
1825                                 vm_fault_unwire(map, entry->start, entry->end);
1826                         entry = entry->next;
1827                 }
1828         }
1829 done:
1830         vm_map_unclip_range(map, start_entry, start, real_end, &count,
1831                 MAP_CLIP_NO_HOLES);
1832         map->timestamp++;
1833         vm_map_unlock(map);
1834         vm_map_entry_release(count);
1835         return (rv);
1836 }
1837
1838 /*
1839  *      vm_map_wire:
1840  *
1841  *      Sets the pageability of the specified address
1842  *      range in the target map.  Regions specified
1843  *      as not pageable require locked-down physical
1844  *      memory and physical page maps.
1845  *
1846  *      The map must not be locked, but a reference
1847  *      must remain to the map throughout the call.
1848  *
1849  *      This function may be called via the zalloc path and must properly
1850  *      reserve map entries for kernel_map.
1851  */
1852 int
1853 vm_map_wire(vm_map_t map, vm_offset_t start, 
1854         vm_offset_t real_end, boolean_t new_pageable)
1855 {
1856         vm_map_entry_t entry;
1857         vm_map_entry_t start_entry;
1858         vm_offset_t end;
1859         int rv = KERN_SUCCESS;
1860         int count;
1861         int s;
1862
1863         if (map == kernel_map)
1864                 count = vm_map_entry_kreserve(MAP_RESERVE_COUNT);
1865         else
1866                 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1867         vm_map_lock(map);
1868         VM_MAP_RANGE_CHECK(map, start, real_end);
1869         end = real_end;
1870
1871         start_entry = vm_map_clip_range(map, start, end, &count, MAP_CLIP_NO_HOLES);
1872         if (start_entry == NULL) {
1873                 vm_map_unlock(map);
1874                 rv = KERN_INVALID_ADDRESS;
1875                 goto failure;
1876         }
1877         if (new_pageable == 0) {
1878                 /*
1879                  * Wiring.  
1880                  *
1881                  * 1.  Holding the write lock, we create any shadow or zero-fill
1882                  * objects that need to be created. Then we clip each map
1883                  * entry to the region to be wired and increment its wiring
1884                  * count.  We create objects before clipping the map entries
1885                  * to avoid object proliferation.
1886                  *
1887                  * 2.  We downgrade to a read lock, and call vm_fault_wire to
1888                  * fault in the pages for any newly wired area (wired_count is
1889                  * 1).
1890                  *
1891                  * Downgrading to a read lock for vm_fault_wire avoids a 
1892                  * possible deadlock with another process that may have faulted
1893                  * on one of the pages to be wired (it would mark the page busy,
1894                  * blocking us, then in turn block on the map lock that we
1895                  * hold).  Because of problems in the recursive lock package,
1896                  * we cannot upgrade to a write lock in vm_map_lookup.  Thus,
1897                  * any actions that require the write lock must be done
1898                  * beforehand.  Because we keep the read lock on the map, the
1899                  * copy-on-write status of the entries we modify here cannot
1900                  * change.
1901                  */
1902
1903                 entry = start_entry;
1904                 while ((entry != &map->header) && (entry->start < end)) {
1905                         /*
1906                          * Trivial case if the entry is already wired
1907                          */
1908                         if (entry->wired_count) {
1909                                 entry->wired_count++;
1910                                 entry = entry->next;
1911                                 continue;
1912                         }
1913
1914                         /*
1915                          * The entry is being newly wired, we have to setup
1916                          * appropriate management structures.  A shadow 
1917                          * object is required for a copy-on-write region,
1918                          * or a normal object for a zero-fill region.  We
1919                          * do not have to do this for entries that point to sub
1920                          * maps because we won't hold the lock on the sub map.
1921                          */
1922                         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1923                                 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
1924                                 if (copyflag &&
1925                                     ((entry->protection & VM_PROT_WRITE) != 0)) {
1926
1927                                         vm_object_shadow(&entry->object.vm_object,
1928                                             &entry->offset,
1929                                             atop(entry->end - entry->start));
1930                                         entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
1931                                 } else if (entry->object.vm_object == NULL &&
1932                                            !map->system_map) {
1933                                         entry->object.vm_object =
1934                                             vm_object_allocate(OBJT_DEFAULT,
1935                                                 atop(entry->end - entry->start));
1936                                         entry->offset = (vm_offset_t) 0;
1937                                 }
1938                         }
1939
1940                         entry->wired_count++;
1941                         entry = entry->next;
1942                 }
1943
1944                 /*
1945                  * Pass 2.
1946                  */
1947
1948                 /*
1949                  * HACK HACK HACK HACK
1950                  *
1951                  * Unlock the map to avoid deadlocks.  The in-transit flag
1952                  * protects us from most changes but note that
1953                  * clipping may still occur.  To prevent clipping from
1954                  * occuring after the unlock, except for when we are
1955                  * blocking in vm_fault_wire, we must run at splvm().
1956                  * Otherwise our accesses to entry->start and entry->end
1957                  * could be corrupted.  We have to set splvm() prior to
1958                  * unlocking so start_entry does not change out from
1959                  * under us at the very beginning of the loop.
1960                  *
1961                  * HACK HACK HACK HACK
1962                  */
1963
1964                 s = splvm();
1965                 vm_map_unlock(map);
1966
1967                 entry = start_entry;
1968                 while (entry != &map->header && entry->start < end) {
1969                         /*
1970                          * If vm_fault_wire fails for any page we need to undo
1971                          * what has been done.  We decrement the wiring count
1972                          * for those pages which have not yet been wired (now)
1973                          * and unwire those that have (later).
1974                          */
1975                         vm_offset_t save_start = entry->start;
1976                         vm_offset_t save_end = entry->end;
1977
1978                         if (entry->wired_count == 1)
1979                                 rv = vm_fault_wire(map, entry->start, entry->end);
1980                         if (rv) {
1981                                 CLIP_CHECK_BACK(entry, save_start);
1982                                 for (;;) {
1983                                         KASSERT(entry->wired_count == 1, ("wired_count changed unexpectedly"));
1984                                         entry->wired_count = 0;
1985                                         if (entry->end == save_end)
1986                                                 break;
1987                                         entry = entry->next;
1988                                         KASSERT(entry != &map->header, ("bad entry clip during backout"));
1989                                 }
1990                                 end = save_start;
1991                                 break;
1992                         }
1993                         CLIP_CHECK_FWD(entry, save_end);
1994                         entry = entry->next;
1995                 }
1996                 splx(s);
1997
1998                 /*
1999                  * relock.  start_entry is still IN_TRANSITION and must
2000                  * still exist, but may have been clipped (handled just
2001                  * below).
2002                  */
2003                 vm_map_lock(map);
2004
2005                 /*
2006                  * If a failure occured undo everything by falling through
2007                  * to the unwiring code.  'end' has already been adjusted
2008                  * appropriately.
2009                  */
2010                 if (rv)
2011                         new_pageable = 1;
2012
2013                 /*
2014                  * start_entry might have been clipped if we unlocked the
2015                  * map and blocked.  No matter how clipped it has gotten
2016                  * there should be a fragment that is on our start boundary.
2017                  */
2018                 CLIP_CHECK_BACK(start_entry, start);
2019         }
2020
2021         if (new_pageable) {
2022                 /*
2023                  * This is the unwiring case.  We must first ensure that the
2024                  * range to be unwired is really wired down.  We know there
2025                  * are no holes.
2026                  */
2027                 entry = start_entry;
2028                 while ((entry != &map->header) && (entry->start < end)) {
2029                         if (entry->wired_count == 0) {
2030                                 rv = KERN_INVALID_ARGUMENT;
2031                                 goto done;
2032                         }
2033                         entry = entry->next;
2034                 }
2035
2036                 /*
2037                  * Now decrement the wiring count for each region. If a region
2038                  * becomes completely unwired, unwire its physical pages and
2039                  * mappings.
2040                  */
2041                 entry = start_entry;
2042                 while ((entry != &map->header) && (entry->start < end)) {
2043                         entry->wired_count--;
2044                         if (entry->wired_count == 0)
2045                                 vm_fault_unwire(map, entry->start, entry->end);
2046                         entry = entry->next;
2047                 }
2048         }
2049 done:
2050         vm_map_unclip_range(map, start_entry, start, real_end, &count,
2051                 MAP_CLIP_NO_HOLES);
2052         map->timestamp++;
2053         vm_map_unlock(map);
2054 failure:
2055         if (map == kernel_map)
2056                 vm_map_entry_krelease(count);
2057         else
2058                 vm_map_entry_release(count);
2059         return (rv);
2060 }
2061
2062 /*
2063  * vm_map_set_wired_quick()
2064  *
2065  *      Mark a newly allocated address range as wired but do not fault in
2066  *      the pages.  The caller is expected to load the pages into the object.
2067  *
2068  *      The map must be locked on entry and will remain locked on return.
2069  */
2070 void
2071 vm_map_set_wired_quick(vm_map_t map, vm_offset_t addr, vm_size_t size, int *countp)
2072 {
2073         vm_map_entry_t scan;
2074         vm_map_entry_t entry;
2075
2076         entry = vm_map_clip_range(map, addr, addr + size, countp, MAP_CLIP_NO_HOLES);
2077         for (scan = entry; scan != &map->header && scan->start < addr + size; scan = scan->next) {
2078             KKASSERT(entry->wired_count == 0);
2079             entry->wired_count = 1;                                              
2080         }
2081         vm_map_unclip_range(map, entry, addr, addr + size, countp, MAP_CLIP_NO_HOLES);
2082 }
2083
2084 /*
2085  * vm_map_clean
2086  *
2087  * Push any dirty cached pages in the address range to their pager.
2088  * If syncio is TRUE, dirty pages are written synchronously.
2089  * If invalidate is TRUE, any cached pages are freed as well.
2090  *
2091  * Returns an error if any part of the specified range is not mapped.
2092  */
2093 int
2094 vm_map_clean(map, start, end, syncio, invalidate)
2095         vm_map_t map;
2096         vm_offset_t start;
2097         vm_offset_t end;
2098         boolean_t syncio;
2099         boolean_t invalidate;
2100 {
2101         vm_map_entry_t current;
2102         vm_map_entry_t entry;
2103         vm_size_t size;
2104         vm_object_t object;
2105         vm_ooffset_t offset;
2106
2107         vm_map_lock_read(map);
2108         VM_MAP_RANGE_CHECK(map, start, end);
2109         if (!vm_map_lookup_entry(map, start, &entry)) {
2110                 vm_map_unlock_read(map);
2111                 return (KERN_INVALID_ADDRESS);
2112         }
2113         /*
2114          * Make a first pass to check for holes.
2115          */
2116         for (current = entry; current->start < end; current = current->next) {
2117                 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
2118                         vm_map_unlock_read(map);
2119                         return (KERN_INVALID_ARGUMENT);
2120                 }
2121                 if (end > current->end &&
2122                     (current->next == &map->header ||
2123                         current->end != current->next->start)) {
2124                         vm_map_unlock_read(map);
2125                         return (KERN_INVALID_ADDRESS);
2126                 }
2127         }
2128
2129         if (invalidate)
2130                 pmap_remove(vm_map_pmap(map), start, end);
2131         /*
2132          * Make a second pass, cleaning/uncaching pages from the indicated
2133          * objects as we go.
2134          */
2135         for (current = entry; current->start < end; current = current->next) {
2136                 offset = current->offset + (start - current->start);
2137                 size = (end <= current->end ? end : current->end) - start;
2138                 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
2139                         vm_map_t smap;
2140                         vm_map_entry_t tentry;
2141                         vm_size_t tsize;
2142
2143                         smap = current->object.sub_map;
2144                         vm_map_lock_read(smap);
2145                         (void) vm_map_lookup_entry(smap, offset, &tentry);
2146                         tsize = tentry->end - offset;
2147                         if (tsize < size)
2148                                 size = tsize;
2149                         object = tentry->object.vm_object;
2150                         offset = tentry->offset + (offset - tentry->start);
2151                         vm_map_unlock_read(smap);
2152                 } else {
2153                         object = current->object.vm_object;
2154                 }
2155                 /*
2156                  * Note that there is absolutely no sense in writing out
2157                  * anonymous objects, so we track down the vnode object
2158                  * to write out.
2159                  * We invalidate (remove) all pages from the address space
2160                  * anyway, for semantic correctness.
2161                  *
2162                  * note: certain anonymous maps, such as MAP_NOSYNC maps,
2163                  * may start out with a NULL object.
2164                  */
2165                 while (object && object->backing_object) {
2166                         object = object->backing_object;
2167                         offset += object->backing_object_offset;
2168                         if (object->size < OFF_TO_IDX( offset + size))
2169                                 size = IDX_TO_OFF(object->size) - offset;
2170                 }
2171                 if (object && (object->type == OBJT_VNODE) && 
2172                     (current->protection & VM_PROT_WRITE)) {
2173                         /*
2174                          * Flush pages if writing is allowed, invalidate them
2175                          * if invalidation requested.  Pages undergoing I/O
2176                          * will be ignored by vm_object_page_remove().
2177                          *
2178                          * We cannot lock the vnode and then wait for paging
2179                          * to complete without deadlocking against vm_fault.
2180                          * Instead we simply call vm_object_page_remove() and
2181                          * allow it to block internally on a page-by-page 
2182                          * basis when it encounters pages undergoing async 
2183                          * I/O.
2184                          */
2185                         int flags;
2186
2187                         vm_object_reference(object);
2188                         vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curthread);
2189                         flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
2190                         flags |= invalidate ? OBJPC_INVAL : 0;
2191                         vm_object_page_clean(object,
2192                             OFF_TO_IDX(offset),
2193                             OFF_TO_IDX(offset + size + PAGE_MASK),
2194                             flags);
2195                         VOP_UNLOCK(object->handle, 0, curthread);
2196                         vm_object_deallocate(object);
2197                 }
2198                 if (object && invalidate &&
2199                    ((object->type == OBJT_VNODE) ||
2200                     (object->type == OBJT_DEVICE))) {
2201                         vm_object_reference(object);
2202                         vm_object_page_remove(object,
2203                             OFF_TO_IDX(offset),
2204                             OFF_TO_IDX(offset + size + PAGE_MASK),
2205                             FALSE);
2206                         vm_object_deallocate(object);
2207                 }
2208                 start += size;
2209         }
2210
2211         vm_map_unlock_read(map);
2212         return (KERN_SUCCESS);
2213 }
2214
2215 /*
2216  *      vm_map_entry_unwire:    [ internal use only ]
2217  *
2218  *      Make the region specified by this entry pageable.
2219  *
2220  *      The map in question should be locked.
2221  *      [This is the reason for this routine's existence.]
2222  */
2223 static void 
2224 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
2225 {
2226         vm_fault_unwire(map, entry->start, entry->end);
2227         entry->wired_count = 0;
2228 }
2229
2230 /*
2231  *      vm_map_entry_delete:    [ internal use only ]
2232  *
2233  *      Deallocate the given entry from the target map.
2234  */
2235 static void
2236 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry, int *countp)
2237 {
2238         vm_map_entry_unlink(map, entry);
2239         map->size -= entry->end - entry->start;
2240
2241         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
2242                 vm_object_deallocate(entry->object.vm_object);
2243         }
2244
2245         vm_map_entry_dispose(map, entry, countp);
2246 }
2247
2248 /*
2249  *      vm_map_delete:  [ internal use only ]
2250  *
2251  *      Deallocates the given address range from the target
2252  *      map.
2253  */
2254 int
2255 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end, int *countp)
2256 {
2257         vm_object_t object;
2258         vm_map_entry_t entry;
2259         vm_map_entry_t first_entry;
2260
2261         /*
2262          * Find the start of the region, and clip it
2263          */
2264
2265 again:
2266         if (!vm_map_lookup_entry(map, start, &first_entry))
2267                 entry = first_entry->next;
2268         else {
2269                 entry = first_entry;
2270                 vm_map_clip_start(map, entry, start, countp);
2271                 /*
2272                  * Fix the lookup hint now, rather than each time though the
2273                  * loop.
2274                  */
2275                 SAVE_HINT(map, entry->prev);
2276         }
2277
2278         /*
2279          * Save the free space hint
2280          */
2281
2282         if (entry == &map->header) {
2283                 map->first_free = &map->header;
2284         } else if (map->first_free->start >= start) {
2285                 map->first_free = entry->prev;
2286         }
2287
2288         /*
2289          * Step through all entries in this region
2290          */
2291
2292         while ((entry != &map->header) && (entry->start < end)) {
2293                 vm_map_entry_t next;
2294                 vm_offset_t s, e;
2295                 vm_pindex_t offidxstart, offidxend, count;
2296
2297                 /*
2298                  * If we hit an in-transition entry we have to sleep and
2299                  * retry.  It's easier (and not really slower) to just retry
2300                  * since this case occurs so rarely and the hint is already
2301                  * pointing at the right place.  We have to reset the
2302                  * start offset so as not to accidently delete an entry
2303                  * another process just created in vacated space.
2304                  */
2305                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2306                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2307                         start = entry->start;
2308                         ++mycpu->gd_cnt.v_intrans_coll;
2309                         ++mycpu->gd_cnt.v_intrans_wait;
2310                         vm_map_transition_wait(map);
2311                         goto again;
2312                 }
2313                 vm_map_clip_end(map, entry, end, countp);
2314
2315                 s = entry->start;
2316                 e = entry->end;
2317                 next = entry->next;
2318
2319                 offidxstart = OFF_TO_IDX(entry->offset);
2320                 count = OFF_TO_IDX(e - s);
2321                 object = entry->object.vm_object;
2322
2323                 /*
2324                  * Unwire before removing addresses from the pmap; otherwise,
2325                  * unwiring will put the entries back in the pmap.
2326                  */
2327                 if (entry->wired_count != 0) {
2328                         vm_map_entry_unwire(map, entry);
2329                 }
2330
2331                 offidxend = offidxstart + count;
2332
2333                 if ((object == kernel_object) || (object == kmem_object)) {
2334                         vm_object_page_remove(object, offidxstart, offidxend, FALSE);
2335                 } else {
2336                         pmap_remove(map->pmap, s, e);
2337                         if (object != NULL &&
2338                             object->ref_count != 1 &&
2339                             (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING &&
2340                             (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
2341                                 vm_object_collapse(object);
2342                                 vm_object_page_remove(object, offidxstart, offidxend, FALSE);
2343                                 if (object->type == OBJT_SWAP) {
2344                                         swap_pager_freespace(object, offidxstart, count);
2345                                 }
2346                                 if (offidxend >= object->size &&
2347                                     offidxstart < object->size) {
2348                                         object->size = offidxstart;
2349                                 }
2350                         }
2351                 }
2352
2353                 /*
2354                  * Delete the entry (which may delete the object) only after
2355                  * removing all pmap entries pointing to its pages.
2356                  * (Otherwise, its page frames may be reallocated, and any
2357                  * modify bits will be set in the wrong object!)
2358                  */
2359                 vm_map_entry_delete(map, entry, countp);
2360                 entry = next;
2361         }
2362         return (KERN_SUCCESS);
2363 }
2364
2365 /*
2366  *      vm_map_remove:
2367  *
2368  *      Remove the given address range from the target map.
2369  *      This is the exported form of vm_map_delete.
2370  */
2371 int
2372 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
2373 {
2374         int result;
2375         int count;
2376
2377         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2378         vm_map_lock(map);
2379         VM_MAP_RANGE_CHECK(map, start, end);
2380         result = vm_map_delete(map, start, end, &count);
2381         vm_map_unlock(map);
2382         vm_map_entry_release(count);
2383
2384         return (result);
2385 }
2386
2387 /*
2388  *      vm_map_check_protection:
2389  *
2390  *      Assert that the target map allows the specified
2391  *      privilege on the entire address region given.
2392  *      The entire region must be allocated.
2393  */
2394 boolean_t
2395 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
2396                         vm_prot_t protection)
2397 {
2398         vm_map_entry_t entry;
2399         vm_map_entry_t tmp_entry;
2400
2401         if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
2402                 return (FALSE);
2403         }
2404         entry = tmp_entry;
2405
2406         while (start < end) {
2407                 if (entry == &map->header) {
2408                         return (FALSE);
2409                 }
2410                 /*
2411                  * No holes allowed!
2412                  */
2413
2414                 if (start < entry->start) {
2415                         return (FALSE);
2416                 }
2417                 /*
2418                  * Check protection associated with entry.
2419                  */
2420
2421                 if ((entry->protection & protection) != protection) {
2422                         return (FALSE);
2423                 }
2424                 /* go to next entry */
2425
2426                 start = entry->end;
2427                 entry = entry->next;
2428         }
2429         return (TRUE);
2430 }
2431
2432 /*
2433  * Split the pages in a map entry into a new object.  This affords
2434  * easier removal of unused pages, and keeps object inheritance from
2435  * being a negative impact on memory usage.
2436  */
2437 static void
2438 vm_map_split(vm_map_entry_t entry)
2439 {
2440         vm_page_t m;
2441         vm_object_t orig_object, new_object, source;
2442         vm_offset_t s, e;
2443         vm_pindex_t offidxstart, offidxend, idx;
2444         vm_size_t size;
2445         vm_ooffset_t offset;
2446
2447         orig_object = entry->object.vm_object;
2448         if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP)
2449                 return;
2450         if (orig_object->ref_count <= 1)
2451                 return;
2452
2453         offset = entry->offset;
2454         s = entry->start;
2455         e = entry->end;
2456
2457         offidxstart = OFF_TO_IDX(offset);
2458         offidxend = offidxstart + OFF_TO_IDX(e - s);
2459         size = offidxend - offidxstart;
2460
2461         new_object = vm_pager_allocate(orig_object->type,
2462                 NULL, IDX_TO_OFF(size), VM_PROT_ALL, 0LL);
2463         if (new_object == NULL)
2464                 return;
2465
2466         source = orig_object->backing_object;
2467         if (source != NULL) {
2468                 vm_object_reference(source);    /* Referenced by new_object */
2469                 LIST_INSERT_HEAD(&source->shadow_head,
2470                                   new_object, shadow_list);
2471                 vm_object_clear_flag(source, OBJ_ONEMAPPING);
2472                 new_object->backing_object_offset = 
2473                         orig_object->backing_object_offset + IDX_TO_OFF(offidxstart);
2474                 new_object->backing_object = source;
2475                 source->shadow_count++;
2476                 source->generation++;
2477         }
2478
2479         for (idx = 0; idx < size; idx++) {
2480                 vm_page_t m;
2481
2482         retry:
2483                 m = vm_page_lookup(orig_object, offidxstart + idx);
2484                 if (m == NULL)
2485                         continue;
2486
2487                 /*
2488                  * We must wait for pending I/O to complete before we can
2489                  * rename the page.
2490                  *
2491                  * We do not have to VM_PROT_NONE the page as mappings should
2492                  * not be changed by this operation.
2493                  */
2494                 if (vm_page_sleep_busy(m, TRUE, "spltwt"))
2495                         goto retry;
2496                         
2497                 vm_page_busy(m);
2498                 vm_page_rename(m, new_object, idx);
2499                 /* page automatically made dirty by rename and cache handled */
2500                 vm_page_busy(m);
2501         }
2502
2503         if (orig_object->type == OBJT_SWAP) {
2504                 vm_object_pip_add(orig_object, 1);
2505                 /*
2506                  * copy orig_object pages into new_object
2507                  * and destroy unneeded pages in
2508                  * shadow object.
2509                  */
2510                 swap_pager_copy(orig_object, new_object, offidxstart, 0);
2511                 vm_object_pip_wakeup(orig_object);
2512         }
2513
2514         for (idx = 0; idx < size; idx++) {
2515                 m = vm_page_lookup(new_object, idx);
2516                 if (m) {
2517                         vm_page_wakeup(m);
2518                 }
2519         }
2520
2521         entry->object.vm_object = new_object;
2522         entry->offset = 0LL;
2523         vm_object_deallocate(orig_object);
2524 }
2525
2526 /*
2527  *      vm_map_copy_entry:
2528  *
2529  *      Copies the contents of the source entry to the destination
2530  *      entry.  The entries *must* be aligned properly.
2531  */
2532 static void
2533 vm_map_copy_entry(vm_map_t src_map, vm_map_t dst_map,
2534         vm_map_entry_t src_entry, vm_map_entry_t dst_entry)
2535 {
2536         vm_object_t src_object;
2537
2538         if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
2539                 return;
2540
2541         if (src_entry->wired_count == 0) {
2542
2543                 /*
2544                  * If the source entry is marked needs_copy, it is already
2545                  * write-protected.
2546                  */
2547                 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
2548                         pmap_protect(src_map->pmap,
2549                             src_entry->start,
2550                             src_entry->end,
2551                             src_entry->protection & ~VM_PROT_WRITE);
2552                 }
2553
2554                 /*
2555                  * Make a copy of the object.
2556                  */
2557                 if ((src_object = src_entry->object.vm_object) != NULL) {
2558
2559                         if ((src_object->handle == NULL) &&
2560                                 (src_object->type == OBJT_DEFAULT ||
2561                                  src_object->type == OBJT_SWAP)) {
2562                                 vm_object_collapse(src_object);
2563                                 if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
2564                                         vm_map_split(src_entry);
2565                                         src_object = src_entry->object.vm_object;
2566                                 }
2567                         }
2568
2569                         vm_object_reference(src_object);
2570                         vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
2571                         dst_entry->object.vm_object = src_object;
2572                         src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2573                         dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2574                         dst_entry->offset = src_entry->offset;
2575                 } else {
2576                         dst_entry->object.vm_object = NULL;
2577                         dst_entry->offset = 0;
2578                 }
2579
2580                 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
2581                     dst_entry->end - dst_entry->start, src_entry->start);
2582         } else {
2583                 /*
2584                  * Of course, wired down pages can't be set copy-on-write.
2585                  * Cause wired pages to be copied into the new map by
2586                  * simulating faults (the new pages are pageable)
2587                  */
2588                 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
2589         }
2590 }
2591
2592 /*
2593  * vmspace_fork:
2594  * Create a new process vmspace structure and vm_map
2595  * based on those of an existing process.  The new map
2596  * is based on the old map, according to the inheritance
2597  * values on the regions in that map.
2598  *
2599  * The source map must not be locked.
2600  */
2601 struct vmspace *
2602 vmspace_fork(struct vmspace *vm1)
2603 {
2604         struct vmspace *vm2;
2605         vm_map_t old_map = &vm1->vm_map;
2606         vm_map_t new_map;
2607         vm_map_entry_t old_entry;
2608         vm_map_entry_t new_entry;
2609         vm_object_t object;
2610         int count;
2611
2612         vm_map_lock(old_map);
2613         old_map->infork = 1;
2614
2615         vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
2616         bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
2617             (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
2618         new_map = &vm2->vm_map; /* XXX */
2619         new_map->timestamp = 1;
2620
2621         count = 0;
2622         old_entry = old_map->header.next;
2623         while (old_entry != &old_map->header) {
2624                 ++count;
2625                 old_entry = old_entry->next;
2626         }
2627
2628         count = vm_map_entry_reserve(count + MAP_RESERVE_COUNT);
2629
2630         old_entry = old_map->header.next;
2631         while (old_entry != &old_map->header) {
2632                 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
2633                         panic("vm_map_fork: encountered a submap");
2634
2635                 switch (old_entry->inheritance) {
2636                 case VM_INHERIT_NONE:
2637                         break;
2638
2639                 case VM_INHERIT_SHARE:
2640                         /*
2641                          * Clone the entry, creating the shared object if necessary.
2642                          */
2643                         object = old_entry->object.vm_object;
2644                         if (object == NULL) {
2645                                 object = vm_object_allocate(OBJT_DEFAULT,
2646                                         atop(old_entry->end - old_entry->start));
2647                                 old_entry->object.vm_object = object;
2648                                 old_entry->offset = (vm_offset_t) 0;
2649                         }
2650
2651                         /*
2652                          * Add the reference before calling vm_object_shadow
2653                          * to insure that a shadow object is created.
2654                          */
2655                         vm_object_reference(object);
2656                         if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2657                                 vm_object_shadow(&old_entry->object.vm_object,
2658                                         &old_entry->offset,
2659                                         atop(old_entry->end - old_entry->start));
2660                                 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
2661                                 /* Transfer the second reference too. */
2662                                 vm_object_reference(
2663                                     old_entry->object.vm_object);
2664                                 vm_object_deallocate(object);
2665                                 object = old_entry->object.vm_object;
2666                         }
2667                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
2668
2669                         /*
2670                          * Clone the entry, referencing the shared object.
2671                          */
2672                         new_entry = vm_map_entry_create(new_map, &count);
2673                         *new_entry = *old_entry;
2674                         new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2675                         new_entry->wired_count = 0;
2676
2677                         /*
2678                          * Insert the entry into the new map -- we know we're
2679                          * inserting at the end of the new map.
2680                          */
2681
2682                         vm_map_entry_link(new_map, new_map->header.prev,
2683                             new_entry);
2684
2685                         /*
2686                          * Update the physical map
2687                          */
2688
2689                         pmap_copy(new_map->pmap, old_map->pmap,
2690                             new_entry->start,
2691                             (old_entry->end - old_entry->start),
2692                             old_entry->start);
2693                         break;
2694
2695                 case VM_INHERIT_COPY:
2696                         /*
2697                          * Clone the entry and link into the map.
2698                          */
2699                         new_entry = vm_map_entry_create(new_map, &count);
2700                         *new_entry = *old_entry;
2701                         new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2702                         new_entry->wired_count = 0;
2703                         new_entry->object.vm_object = NULL;
2704                         vm_map_entry_link(new_map, new_map->header.prev,
2705                             new_entry);
2706                         vm_map_copy_entry(old_map, new_map, old_entry,
2707                             new_entry);
2708                         break;
2709                 }
2710                 old_entry = old_entry->next;
2711         }
2712
2713         new_map->size = old_map->size;
2714         old_map->infork = 0;
2715         vm_map_unlock(old_map);
2716         vm_map_entry_release(count);
2717
2718         return (vm2);
2719 }
2720
2721 int
2722 vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
2723               vm_prot_t prot, vm_prot_t max, int cow)
2724 {
2725         vm_map_entry_t prev_entry;
2726         vm_map_entry_t new_stack_entry;
2727         vm_size_t      init_ssize;
2728         int            rv;
2729         int             count;
2730
2731         if (VM_MIN_ADDRESS > 0 && addrbos < VM_MIN_ADDRESS)
2732                 return (KERN_NO_SPACE);
2733
2734         if (max_ssize < sgrowsiz)
2735                 init_ssize = max_ssize;
2736         else
2737                 init_ssize = sgrowsiz;
2738
2739         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2740         vm_map_lock(map);
2741
2742         /* If addr is already mapped, no go */
2743         if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
2744                 vm_map_unlock(map);
2745                 vm_map_entry_release(count);
2746                 return (KERN_NO_SPACE);
2747         }
2748
2749         /* If we would blow our VMEM resource limit, no go */
2750         if (map->size + init_ssize >
2751             curproc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
2752                 vm_map_unlock(map);
2753                 vm_map_entry_release(count);
2754                 return (KERN_NO_SPACE);
2755         }
2756
2757         /* If we can't accomodate max_ssize in the current mapping,
2758          * no go.  However, we need to be aware that subsequent user
2759          * mappings might map into the space we have reserved for
2760          * stack, and currently this space is not protected.  
2761          * 
2762          * Hopefully we will at least detect this condition 
2763          * when we try to grow the stack.
2764          */
2765         if ((prev_entry->next != &map->header) &&
2766             (prev_entry->next->start < addrbos + max_ssize)) {
2767                 vm_map_unlock(map);
2768                 vm_map_entry_release(count);
2769                 return (KERN_NO_SPACE);
2770         }
2771
2772         /* We initially map a stack of only init_ssize.  We will
2773          * grow as needed later.  Since this is to be a grow 
2774          * down stack, we map at the top of the range.
2775          *
2776          * Note: we would normally expect prot and max to be
2777          * VM_PROT_ALL, and cow to be 0.  Possibly we should
2778          * eliminate these as input parameters, and just
2779          * pass these values here in the insert call.
2780          */
2781         rv = vm_map_insert(map, &count,
2782                            NULL, 0, addrbos + max_ssize - init_ssize,
2783                            addrbos + max_ssize, prot, max, cow);
2784
2785         /* Now set the avail_ssize amount */
2786         if (rv == KERN_SUCCESS){
2787                 if (prev_entry != &map->header)
2788                         vm_map_clip_end(map, prev_entry, addrbos + max_ssize - init_ssize, &count);
2789                 new_stack_entry = prev_entry->next;
2790                 if (new_stack_entry->end   != addrbos + max_ssize ||
2791                     new_stack_entry->start != addrbos + max_ssize - init_ssize)
2792                         panic ("Bad entry start/end for new stack entry");
2793                 else 
2794                         new_stack_entry->avail_ssize = max_ssize - init_ssize;
2795         }
2796
2797         vm_map_unlock(map);
2798         vm_map_entry_release(count);
2799         return (rv);
2800 }
2801
2802 /* Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
2803  * desired address is already mapped, or if we successfully grow
2804  * the stack.  Also returns KERN_SUCCESS if addr is outside the
2805  * stack range (this is strange, but preserves compatibility with
2806  * the grow function in vm_machdep.c).
2807  */
2808 int
2809 vm_map_growstack (struct proc *p, vm_offset_t addr)
2810 {
2811         vm_map_entry_t prev_entry;
2812         vm_map_entry_t stack_entry;
2813         vm_map_entry_t new_stack_entry;
2814         struct vmspace *vm = p->p_vmspace;
2815         vm_map_t map = &vm->vm_map;
2816         vm_offset_t    end;
2817         int grow_amount;
2818         int rv = KERN_SUCCESS;
2819         int is_procstack;
2820         int use_read_lock = 1;
2821         int count;
2822
2823         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2824 Retry:
2825         if (use_read_lock)
2826                 vm_map_lock_read(map);
2827         else
2828                 vm_map_lock(map);
2829
2830         /* If addr is already in the entry range, no need to grow.*/
2831         if (vm_map_lookup_entry(map, addr, &prev_entry))
2832                 goto done;
2833
2834         if ((stack_entry = prev_entry->next) == &map->header)
2835                 goto done;
2836         if (prev_entry == &map->header) 
2837                 end = stack_entry->start - stack_entry->avail_ssize;
2838         else
2839                 end = prev_entry->end;
2840
2841         /* This next test mimics the old grow function in vm_machdep.c.
2842          * It really doesn't quite make sense, but we do it anyway
2843          * for compatibility.
2844          *
2845          * If not growable stack, return success.  This signals the
2846          * caller to proceed as he would normally with normal vm.
2847          */
2848         if (stack_entry->avail_ssize < 1 ||
2849             addr >= stack_entry->start ||
2850             addr <  stack_entry->start - stack_entry->avail_ssize) {
2851                 goto done;
2852         } 
2853         
2854         /* Find the minimum grow amount */
2855         grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE);
2856         if (grow_amount > stack_entry->avail_ssize) {
2857                 rv = KERN_NO_SPACE;
2858                 goto done;
2859         }
2860
2861         /* If there is no longer enough space between the entries
2862          * nogo, and adjust the available space.  Note: this 
2863          * should only happen if the user has mapped into the
2864          * stack area after the stack was created, and is
2865          * probably an error.
2866          *
2867          * This also effectively destroys any guard page the user
2868          * might have intended by limiting the stack size.
2869          */
2870         if (grow_amount > stack_entry->start - end) {
2871                 if (use_read_lock && vm_map_lock_upgrade(map)) {
2872                         use_read_lock = 0;
2873                         goto Retry;
2874                 }
2875                 use_read_lock = 0;
2876                 stack_entry->avail_ssize = stack_entry->start - end;
2877                 rv = KERN_NO_SPACE;
2878                 goto done;
2879         }
2880
2881         is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr;
2882
2883         /* If this is the main process stack, see if we're over the 
2884          * stack limit.
2885          */
2886         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
2887                              p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
2888                 rv = KERN_NO_SPACE;
2889                 goto done;
2890         }
2891
2892         /* Round up the grow amount modulo SGROWSIZ */
2893         grow_amount = roundup (grow_amount, sgrowsiz);
2894         if (grow_amount > stack_entry->avail_ssize) {
2895                 grow_amount = stack_entry->avail_ssize;
2896         }
2897         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
2898                              p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
2899                 grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
2900                               ctob(vm->vm_ssize);
2901         }
2902
2903         /* If we would blow our VMEM resource limit, no go */
2904         if (map->size + grow_amount > p->p_rlimit[RLIMIT_VMEM].rlim_cur) {
2905                 rv = KERN_NO_SPACE;
2906                 goto done;
2907         }
2908
2909         if (use_read_lock && vm_map_lock_upgrade(map)) {
2910                 use_read_lock = 0;
2911                 goto Retry;
2912         }
2913         use_read_lock = 0;
2914
2915         /* Get the preliminary new entry start value */
2916         addr = stack_entry->start - grow_amount;
2917
2918         /* If this puts us into the previous entry, cut back our growth
2919          * to the available space.  Also, see the note above.
2920          */
2921         if (addr < end) {
2922                 stack_entry->avail_ssize = stack_entry->start - end;
2923                 addr = end;
2924         }
2925
2926         rv = vm_map_insert(map, &count,
2927                            NULL, 0, addr, stack_entry->start,
2928                            VM_PROT_ALL,
2929                            VM_PROT_ALL,
2930                            0);
2931
2932         /* Adjust the available stack space by the amount we grew. */
2933         if (rv == KERN_SUCCESS) {
2934                 if (prev_entry != &map->header)
2935                         vm_map_clip_end(map, prev_entry, addr, &count);
2936                 new_stack_entry = prev_entry->next;
2937                 if (new_stack_entry->end   != stack_entry->start  ||
2938                     new_stack_entry->start != addr)
2939                         panic ("Bad stack grow start/end in new stack entry");
2940                 else {
2941                         new_stack_entry->avail_ssize = stack_entry->avail_ssize -
2942                                                         (new_stack_entry->end -
2943                                                          new_stack_entry->start);
2944                         if (is_procstack)
2945                                 vm->vm_ssize += btoc(new_stack_entry->end -
2946                                                      new_stack_entry->start);
2947                 }
2948         }
2949
2950 done:
2951         if (use_read_lock)
2952                 vm_map_unlock_read(map);
2953         else
2954                 vm_map_unlock(map);
2955         vm_map_entry_release(count);
2956         return (rv);
2957 }
2958
2959 /*
2960  * Unshare the specified VM space for exec.  If other processes are
2961  * mapped to it, then create a new one.  The new vmspace is null.
2962  */
2963
2964 void
2965 vmspace_exec(struct proc *p) 
2966 {
2967         struct vmspace *oldvmspace = p->p_vmspace;
2968         struct vmspace *newvmspace;
2969         vm_map_t map = &p->p_vmspace->vm_map;
2970
2971         newvmspace = vmspace_alloc(map->min_offset, map->max_offset);
2972         bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
2973             (caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy);
2974         /*
2975          * This code is written like this for prototype purposes.  The
2976          * goal is to avoid running down the vmspace here, but let the
2977          * other process's that are still using the vmspace to finally
2978          * run it down.  Even though there is little or no chance of blocking
2979          * here, it is a good idea to keep this form for future mods.
2980          */
2981         vmspace_free(oldvmspace);
2982         p->p_vmspace = newvmspace;
2983         pmap_pinit2(vmspace_pmap(newvmspace));
2984         if (p == curproc)
2985                 pmap_activate(p);
2986 }
2987
2988 /*
2989  * Unshare the specified VM space for forcing COW.  This
2990  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
2991  */
2992
2993 void
2994 vmspace_unshare(struct proc *p) 
2995 {
2996         struct vmspace *oldvmspace = p->p_vmspace;
2997         struct vmspace *newvmspace;
2998
2999         if (oldvmspace->vm_refcnt == 1)
3000                 return;
3001         newvmspace = vmspace_fork(oldvmspace);
3002         vmspace_free(oldvmspace);
3003         p->p_vmspace = newvmspace;
3004         pmap_pinit2(vmspace_pmap(newvmspace));
3005         if (p == curproc)
3006                 pmap_activate(p);
3007 }
3008
3009 /*
3010  *      vm_map_lookup:
3011  *
3012  *      Finds the VM object, offset, and
3013  *      protection for a given virtual address in the
3014  *      specified map, assuming a page fault of the
3015  *      type specified.
3016  *
3017  *      Leaves the map in question locked for read; return
3018  *      values are guaranteed until a vm_map_lookup_done
3019  *      call is performed.  Note that the map argument
3020  *      is in/out; the returned map must be used in
3021  *      the call to vm_map_lookup_done.
3022  *
3023  *      A handle (out_entry) is returned for use in
3024  *      vm_map_lookup_done, to make that fast.
3025  *
3026  *      If a lookup is requested with "write protection"
3027  *      specified, the map may be changed to perform virtual
3028  *      copying operations, although the data referenced will
3029  *      remain the same.
3030  */
3031 int
3032 vm_map_lookup(vm_map_t *var_map,                /* IN/OUT */
3033               vm_offset_t vaddr,
3034               vm_prot_t fault_typea,
3035               vm_map_entry_t *out_entry,        /* OUT */
3036               vm_object_t *object,              /* OUT */
3037               vm_pindex_t *pindex,              /* OUT */
3038               vm_prot_t *out_prot,              /* OUT */
3039               boolean_t *wired)                 /* OUT */
3040 {
3041         vm_map_entry_t entry;
3042         vm_map_t map = *var_map;
3043         vm_prot_t prot;
3044         vm_prot_t fault_type = fault_typea;
3045         int use_read_lock = 1;
3046         int rv = KERN_SUCCESS;
3047
3048 RetryLookup:
3049         if (use_read_lock)
3050                 vm_map_lock_read(map);
3051         else
3052                 vm_map_lock(map);
3053
3054         /*
3055          * If the map has an interesting hint, try it before calling full
3056          * blown lookup routine.
3057          */
3058         entry = map->hint;
3059         *out_entry = entry;
3060
3061         if ((entry == &map->header) ||
3062             (vaddr < entry->start) || (vaddr >= entry->end)) {
3063                 vm_map_entry_t tmp_entry;
3064
3065                 /*
3066                  * Entry was either not a valid hint, or the vaddr was not
3067                  * contained in the entry, so do a full lookup.
3068                  */
3069                 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) {
3070                         rv = KERN_INVALID_ADDRESS;
3071                         goto done;
3072                 }
3073
3074                 entry = tmp_entry;
3075                 *out_entry = entry;
3076         }
3077         
3078         /*
3079          * Handle submaps.
3080          */
3081
3082         if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
3083                 vm_map_t old_map = map;
3084
3085                 *var_map = map = entry->object.sub_map;
3086                 if (use_read_lock)
3087                         vm_map_unlock_read(old_map);
3088                 else
3089                         vm_map_unlock(old_map);
3090                 use_read_lock = 1;
3091                 goto RetryLookup;
3092         }
3093
3094         /*
3095          * Check whether this task is allowed to have this page.
3096          * Note the special case for MAP_ENTRY_COW
3097          * pages with an override.  This is to implement a forced
3098          * COW for debuggers.
3099          */
3100
3101         if (fault_type & VM_PROT_OVERRIDE_WRITE)
3102                 prot = entry->max_protection;
3103         else
3104                 prot = entry->protection;
3105
3106         fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
3107         if ((fault_type & prot) != fault_type) {
3108                 rv = KERN_PROTECTION_FAILURE;
3109                 goto done;
3110         }
3111
3112         if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
3113             (entry->eflags & MAP_ENTRY_COW) &&
3114             (fault_type & VM_PROT_WRITE) &&
3115             (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
3116                 rv = KERN_PROTECTION_FAILURE;
3117                 goto done;
3118         }
3119
3120         /*
3121          * If this page is not pageable, we have to get it for all possible
3122          * accesses.
3123          */
3124
3125         *wired = (entry->wired_count != 0);
3126         if (*wired)
3127                 prot = fault_type = entry->protection;
3128
3129         /*
3130          * If the entry was copy-on-write, we either ...
3131          */
3132
3133         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3134                 /*
3135                  * If we want to write the page, we may as well handle that
3136                  * now since we've got the map locked.
3137                  *
3138                  * If we don't need to write the page, we just demote the
3139                  * permissions allowed.
3140                  */
3141
3142                 if (fault_type & VM_PROT_WRITE) {
3143                         /*
3144                          * Make a new object, and place it in the object
3145                          * chain.  Note that no new references have appeared
3146                          * -- one just moved from the map to the new
3147                          * object.
3148                          */
3149
3150                         if (use_read_lock && vm_map_lock_upgrade(map)) {
3151                                 use_read_lock = 0;
3152                                 goto RetryLookup;
3153                         }
3154                         use_read_lock = 0;
3155
3156                         vm_object_shadow(
3157                             &entry->object.vm_object,
3158                             &entry->offset,
3159                             atop(entry->end - entry->start));
3160
3161                         entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
3162                 } else {
3163                         /*
3164                          * We're attempting to read a copy-on-write page --
3165                          * don't allow writes.
3166                          */
3167
3168                         prot &= ~VM_PROT_WRITE;
3169                 }
3170         }
3171
3172         /*
3173          * Create an object if necessary.
3174          */
3175         if (entry->object.vm_object == NULL &&
3176             !map->system_map) {
3177                 if (use_read_lock && vm_map_lock_upgrade(map))  {
3178                         use_read_lock = 0;
3179                         goto RetryLookup;
3180                 }
3181                 use_read_lock = 0;
3182                 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
3183                     atop(entry->end - entry->start));
3184                 entry->offset = 0;
3185         }
3186
3187         /*
3188          * Return the object/offset from this entry.  If the entry was
3189          * copy-on-write or empty, it has been fixed up.
3190          */
3191
3192         *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
3193         *object = entry->object.vm_object;
3194
3195         /*
3196          * Return whether this is the only map sharing this data.  On
3197          * success we return with a read lock held on the map.  On failure
3198          * we return with the map unlocked.
3199          */
3200         *out_prot = prot;
3201 done:
3202         if (rv == KERN_SUCCESS) {
3203                 if (use_read_lock == 0)
3204                         vm_map_lock_downgrade(map);
3205         } else if (use_read_lock) {
3206                 vm_map_unlock_read(map);
3207         } else {
3208                 vm_map_unlock(map);
3209         }
3210         return (rv);
3211 }
3212
3213 /*
3214  *      vm_map_lookup_done:
3215  *
3216  *      Releases locks acquired by a vm_map_lookup
3217  *      (according to the handle returned by that lookup).
3218  */
3219
3220 void
3221 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry, int count)
3222 {
3223         /*
3224          * Unlock the main-level map
3225          */
3226         vm_map_unlock_read(map);
3227         if (count)
3228                 vm_map_entry_release(count);
3229 }
3230
3231 /*
3232  * Implement uiomove with VM operations.  This handles (and collateral changes)
3233  * support every combination of source object modification, and COW type
3234  * operations.
3235  */
3236 int
3237 vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
3238         vm_map_t mapa;
3239         vm_object_t srcobject;
3240         off_t cp;
3241         int cnta;
3242         vm_offset_t uaddra;
3243         int *npages;
3244 {
3245         vm_map_t map;
3246         vm_object_t first_object, oldobject, object;
3247         vm_map_entry_t entry;
3248         vm_prot_t prot;
3249         boolean_t wired;
3250         int tcnt, rv;
3251         vm_offset_t uaddr, start, end, tend;
3252         vm_pindex_t first_pindex, osize, oindex;
3253         off_t ooffset;
3254         int cnt;
3255         int count;
3256
3257         if (npages)
3258                 *npages = 0;
3259
3260         cnt = cnta;
3261         uaddr = uaddra;
3262
3263         while (cnt > 0) {
3264                 map = mapa;
3265
3266                 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
3267
3268                 if ((vm_map_lookup(&map, uaddr,
3269                         VM_PROT_READ, &entry, &first_object,
3270                         &first_pindex, &prot, &wired)) != KERN_SUCCESS) {
3271                         return EFAULT;
3272                 }
3273
3274                 vm_map_clip_start(map, entry, uaddr, &count);
3275
3276                 tcnt = cnt;
3277                 tend = uaddr + tcnt;
3278                 if (tend > entry->end) {
3279                         tcnt = entry->end - uaddr;
3280                         tend = entry->end;
3281                 }
3282
3283                 vm_map_clip_end(map, entry, tend, &count);
3284
3285                 start = entry->start;
3286                 end = entry->end;
3287
3288                 osize = atop(tcnt);
3289
3290                 oindex = OFF_TO_IDX(cp);
3291                 if (npages) {
3292                         vm_pindex_t idx;
3293                         for (idx = 0; idx < osize; idx++) {
3294                                 vm_page_t m;
3295                                 if ((m = vm_page_lookup(srcobject, oindex + idx)) == NULL) {
3296                                         vm_map_lookup_done(map, entry, count);
3297                                         return 0;
3298                                 }
3299                                 /*
3300                                  * disallow busy or invalid pages, but allow
3301                                  * m->busy pages if they are entirely valid.
3302                                  */
3303                                 if ((m->flags & PG_BUSY) ||
3304                                         ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) {
3305                                         vm_map_lookup_done(map, entry, count);
3306                                         return 0;
3307                                 }
3308                         }
3309                 }
3310
3311 /*
3312  * If we are changing an existing map entry, just redirect
3313  * the object, and change mappings.
3314  */
3315                 if ((first_object->type == OBJT_VNODE) &&
3316                         ((oldobject = entry->object.vm_object) == first_object)) {
3317
3318                         if ((entry->offset != cp) || (oldobject != srcobject)) {
3319                                 /*
3320                                 * Remove old window into the file
3321                                 */
3322                                 pmap_remove (map->pmap, uaddr, tend);
3323
3324                                 /*
3325                                 * Force copy on write for mmaped regions
3326                                 */
3327                                 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
3328
3329                                 /*
3330                                 * Point the object appropriately
3331                                 */
3332                                 if (oldobject != srcobject) {
3333
3334                                 /*
3335                                 * Set the object optimization hint flag
3336                                 */
3337                                         vm_object_set_flag(srcobject, OBJ_OPT);
3338                                         vm_object_reference(srcobject);
3339                                         entry->object.vm_object = srcobject;
3340
3341                                         if (oldobject) {
3342                                                 vm_object_deallocate(oldobject);
3343                                         }
3344                                 }
3345
3346                                 entry->offset = cp;
3347                                 map->timestamp++;
3348                         } else {
3349                                 pmap_remove (map->pmap, uaddr, tend);
3350                         }
3351
3352                 } else if ((first_object->ref_count == 1) &&
3353                         (first_object->size == osize) &&
3354                         ((first_object->type == OBJT_DEFAULT) ||
3355                                 (first_object->type == OBJT_SWAP)) ) {
3356
3357                         oldobject = first_object->backing_object;
3358
3359                         if ((first_object->backing_object_offset != cp) ||
3360                                 (oldobject != srcobject)) {
3361                                 /*
3362                                 * Remove old window into the file
3363                                 */
3364                                 pmap_remove (map->pmap, uaddr, tend);
3365
3366                                 /*
3367                                  * Remove unneeded old pages
3368                                  */
3369                                 vm_object_page_remove(first_object, 0, 0, 0);
3370
3371                                 /*
3372                                  * Invalidate swap space
3373                                  */
3374                                 if (first_object->type == OBJT_SWAP) {
3375                                         swap_pager_freespace(first_object,
3376                                                 0,
3377                                                 first_object->size);
3378                                 }
3379
3380                                 /*
3381                                 * Force copy on write for mmaped regions
3382                                 */
3383                                 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
3384
3385                                 /*
3386                                 * Point the object appropriately
3387                                 */
3388                                 if (oldobject != srcobject) {
3389
3390                                 /*
3391                                 * Set the object optimization hint flag
3392                                 */
3393                                         vm_object_set_flag(srcobject, OBJ_OPT);
3394                                         vm_object_reference(srcobject);
3395
3396                                         if (oldobject) {
3397                                                 LIST_REMOVE(
3398                                                         first_object, shadow_list);
3399                                                 oldobject->shadow_count--;
3400                                                 /* XXX bump generation? */
3401                                                 vm_object_deallocate(oldobject);
3402                                         }
3403
3404                                         LIST_INSERT_HEAD(&srcobject->shadow_head,
3405                                                 first_object, shadow_list);
3406                                         srcobject->shadow_count++;
3407                                         /* XXX bump generation? */
3408
3409                                         first_object->backing_object = srcobject;
3410                                 }
3411                                 first_object->backing_object_offset = cp;
3412                                 map->timestamp++;
3413                         } else {
3414                                 pmap_remove (map->pmap, uaddr, tend);
3415                         }
3416 /*
3417  * Otherwise, we have to do a logical mmap.
3418  */
3419                 } else {
3420
3421                         vm_object_set_flag(srcobject, OBJ_OPT);
3422                         vm_object_reference(srcobject);
3423
3424                         pmap_remove (map->pmap, uaddr, tend);
3425
3426                         vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
3427                         vm_map_lock_upgrade(map);
3428
3429                         if (entry == &map->header) {
3430                                 map->first_free = &map->header;
3431                         } else if (map->first_free->start >= start) {
3432                                 map->first_free = entry->prev;
3433                         }
3434
3435                         SAVE_HINT(map, entry->prev);
3436                         vm_map_entry_delete(map, entry, &count);
3437
3438                         object = srcobject;
3439                         ooffset = cp;
3440
3441                         rv = vm_map_insert(map, &count,
3442                                 object, ooffset, start, tend,
3443                                 VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE);
3444
3445                         if (rv != KERN_SUCCESS)
3446                                 panic("vm_uiomove: could not insert new entry: %d", rv);
3447                 }
3448
3449 /*
3450  * Map the window directly, if it is already in memory
3451  */
3452                 pmap_object_init_pt(map->pmap, uaddr,
3453                         srcobject, oindex, tcnt, 0);
3454
3455                 map->timestamp++;
3456                 vm_map_unlock(map);
3457                 vm_map_entry_release(count);
3458
3459                 cnt -= tcnt;
3460                 uaddr += tcnt;
3461                 cp += tcnt;
3462                 if (npages)
3463                         *npages += osize;
3464         }
3465         return 0;
3466 }
3467
3468 /*
3469  * Performs the copy_on_write operations necessary to allow the virtual copies
3470  * into user space to work.  This has to be called for write(2) system calls
3471  * from other processes, file unlinking, and file size shrinkage.
3472  */
3473 void
3474 vm_freeze_copyopts(object, froma, toa)
3475         vm_object_t object;
3476         vm_pindex_t froma, toa;
3477 {
3478         int rv;
3479         vm_object_t robject;
3480         vm_pindex_t idx;
3481
3482         if ((object == NULL) ||
3483                 ((object->flags & OBJ_OPT) == 0))
3484                 return;
3485
3486         if (object->shadow_count > object->ref_count)
3487                 panic("vm_freeze_copyopts: sc > rc");
3488
3489         while((robject = LIST_FIRST(&object->shadow_head)) != NULL) {
3490                 vm_pindex_t bo_pindex;
3491                 vm_page_t m_in, m_out;
3492
3493                 bo_pindex = OFF_TO_IDX(robject->backing_object_offset);
3494
3495                 vm_object_reference(robject);
3496
3497                 vm_object_pip_wait(robject, "objfrz");
3498
3499                 if (robject->ref_count == 1) {
3500                         vm_object_deallocate(robject);
3501                         continue;
3502                 }
3503
3504                 vm_object_pip_add(robject, 1);
3505
3506                 for (idx = 0; idx < robject->size; idx++) {
3507
3508                         m_out = vm_page_grab(robject, idx,
3509                                                 VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
3510
3511                         if (m_out->valid == 0) {
3512                                 m_in = vm_page_grab(object, bo_pindex + idx,
3513                                                 VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
3514                                 if (m_in->valid == 0) {
3515                                         rv = vm_pager_get_pages(object, &m_in, 1, 0);
3516                                         if (rv != VM_PAGER_OK) {
3517                                                 printf("vm_freeze_copyopts: cannot read page from file: %lx\n", (long)m_in->pindex);
3518                                                 continue;
3519                                         }
3520                                         vm_page_deactivate(m_in);
3521                                 }
3522
3523                                 vm_page_protect(m_in, VM_PROT_NONE);
3524                                 pmap_copy_page(VM_PAGE_TO_PHYS(m_in), VM_PAGE_TO_PHYS(m_out));
3525                                 m_out->valid = m_in->valid;
3526                                 vm_page_dirty(m_out);
3527                                 vm_page_activate(m_out);
3528                                 vm_page_wakeup(m_in);
3529                         }
3530                         vm_page_wakeup(m_out);
3531                 }
3532
3533                 object->shadow_count--;
3534                 object->ref_count--;
3535                 LIST_REMOVE(robject, shadow_list);
3536                 robject->backing_object = NULL;
3537                 robject->backing_object_offset = 0;
3538
3539                 vm_object_pip_wakeup(robject);
3540                 vm_object_deallocate(robject);
3541         }
3542
3543         vm_object_clear_flag(object, OBJ_OPT);
3544 }
3545
3546 #include "opt_ddb.h"
3547 #ifdef DDB
3548 #include <sys/kernel.h>
3549
3550 #include <ddb/ddb.h>
3551
3552 /*
3553  *      vm_map_print:   [ debug ]
3554  */
3555 DB_SHOW_COMMAND(map, vm_map_print)
3556 {
3557         static int nlines;
3558         /* XXX convert args. */
3559         vm_map_t map = (vm_map_t)addr;
3560         boolean_t full = have_addr;
3561
3562         vm_map_entry_t entry;
3563
3564         db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
3565             (void *)map,
3566             (void *)map->pmap, map->nentries, map->timestamp);
3567         nlines++;
3568
3569         if (!full && db_indent)
3570                 return;
3571
3572         db_indent += 2;
3573         for (entry = map->header.next; entry != &map->header;
3574             entry = entry->next) {
3575                 db_iprintf("map entry %p: start=%p, end=%p\n",
3576                     (void *)entry, (void *)entry->start, (void *)entry->end);
3577                 nlines++;
3578                 {
3579                         static char *inheritance_name[4] =
3580                         {"share", "copy", "none", "donate_copy"};
3581
3582                         db_iprintf(" prot=%x/%x/%s",
3583                             entry->protection,
3584                             entry->max_protection,
3585                             inheritance_name[(int)(unsigned char)entry->inheritance]);
3586                         if (entry->wired_count != 0)
3587                                 db_printf(", wired");
3588                 }
3589                 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
3590                         /* XXX no %qd in kernel.  Truncate entry->offset. */
3591                         db_printf(", share=%p, offset=0x%lx\n",
3592                             (void *)entry->object.sub_map,
3593                             (long)entry->offset);
3594                         nlines++;
3595                         if ((entry->prev == &map->header) ||
3596                             (entry->prev->object.sub_map !=
3597                                 entry->object.sub_map)) {
3598                                 db_indent += 2;
3599                                 vm_map_print((db_expr_t)(intptr_t)
3600                                              entry->object.sub_map,
3601                                              full, 0, (char *)0);
3602                                 db_indent -= 2;
3603                         }
3604                 } else {
3605                         /* XXX no %qd in kernel.  Truncate entry->offset. */
3606                         db_printf(", object=%p, offset=0x%lx",
3607                             (void *)entry->object.vm_object,
3608                             (long)entry->offset);
3609                         if (entry->eflags & MAP_ENTRY_COW)
3610                                 db_printf(", copy (%s)",
3611                                     (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
3612                         db_printf("\n");
3613                         nlines++;
3614
3615                         if ((entry->prev == &map->header) ||
3616                             (entry->prev->object.vm_object !=
3617                                 entry->object.vm_object)) {
3618                                 db_indent += 2;
3619                                 vm_object_print((db_expr_t)(intptr_t)
3620                                                 entry->object.vm_object,
3621                                                 full, 0, (char *)0);
3622                                 nlines += 4;
3623                                 db_indent -= 2;
3624                         }
3625                 }
3626         }
3627         db_indent -= 2;
3628         if (db_indent == 0)
3629                 nlines = 0;
3630 }
3631
3632
3633 DB_SHOW_COMMAND(procvm, procvm)
3634 {
3635         struct proc *p;
3636
3637         if (have_addr) {
3638                 p = (struct proc *) addr;
3639         } else {
3640                 p = curproc;
3641         }
3642
3643         db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
3644             (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
3645             (void *)vmspace_pmap(p->p_vmspace));
3646
3647         vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
3648 }
3649
3650 #endif /* DDB */