kernel -- vm_map: Remove infork field from vm_map.
[dragonfly.git] / sys / vm / vm_map.c
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1991, 1993
5  *      The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *      This product includes software developed by the University of
21  *      California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *      from: @(#)vm_map.c      8.3 (Berkeley) 1/12/94
39  *
40  *
41  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42  * All rights reserved.
43  *
44  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45  *
46  * Permission to use, copy, modify and distribute this software and
47  * its documentation is hereby granted, provided that both the copyright
48  * notice and this permission notice appear in all copies of the
49  * software, derivative works or modified versions, and any portions
50  * thereof, and that both notices appear in supporting documentation.
51  *
52  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55  *
56  * Carnegie Mellon requests users of this software to return to
57  *
58  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
59  *  School of Computer Science
60  *  Carnegie Mellon University
61  *  Pittsburgh PA 15213-3890
62  *
63  * any improvements or extensions that they make and grant Carnegie the
64  * rights to redistribute these changes.
65  *
66  * $FreeBSD: src/sys/vm/vm_map.c,v 1.187.2.19 2003/05/27 00:47:02 alc Exp $
67  * $DragonFly: src/sys/vm/vm_map.c,v 1.56 2007/04/29 18:25:41 dillon Exp $
68  */
69
70 /*
71  *      Virtual memory mapping module.
72  */
73
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/kernel.h>
77 #include <sys/proc.h>
78 #include <sys/serialize.h>
79 #include <sys/lock.h>
80 #include <sys/vmmeter.h>
81 #include <sys/mman.h>
82 #include <sys/vnode.h>
83 #include <sys/resourcevar.h>
84 #include <sys/shm.h>
85 #include <sys/tree.h>
86 #include <sys/malloc.h>
87
88 #include <vm/vm.h>
89 #include <vm/vm_param.h>
90 #include <vm/pmap.h>
91 #include <vm/vm_map.h>
92 #include <vm/vm_page.h>
93 #include <vm/vm_object.h>
94 #include <vm/vm_pager.h>
95 #include <vm/vm_kern.h>
96 #include <vm/vm_extern.h>
97 #include <vm/swap_pager.h>
98 #include <vm/vm_zone.h>
99
100 #include <sys/thread2.h>
101 #include <sys/sysref2.h>
102 #include <sys/random.h>
103 #include <sys/sysctl.h>
104
105 /*
106  * Virtual memory maps provide for the mapping, protection, and sharing
107  * of virtual memory objects.  In addition, this module provides for an
108  * efficient virtual copy of memory from one map to another.
109  *
110  * Synchronization is required prior to most operations.
111  *
112  * Maps consist of an ordered doubly-linked list of simple entries.
113  * A hint and a RB tree is used to speed-up lookups.
114  *
115  * Callers looking to modify maps specify start/end addresses which cause
116  * the related map entry to be clipped if necessary, and then later
117  * recombined if the pieces remained compatible.
118  *
119  * Virtual copy operations are performed by copying VM object references
120  * from one map to another, and then marking both regions as copy-on-write.
121  */
122 static void vmspace_terminate(struct vmspace *vm);
123 static void vmspace_lock(struct vmspace *vm);
124 static void vmspace_unlock(struct vmspace *vm);
125 static void vmspace_dtor(void *obj, void *private);
126
127 MALLOC_DEFINE(M_VMSPACE, "vmspace", "vmspace objcache backingstore");
128
129 struct sysref_class vmspace_sysref_class = {
130         .name =         "vmspace",
131         .mtype =        M_VMSPACE,
132         .proto =        SYSREF_PROTO_VMSPACE,
133         .offset =       offsetof(struct vmspace, vm_sysref),
134         .objsize =      sizeof(struct vmspace),
135         .mag_capacity = 32,
136         .flags = SRC_MANAGEDINIT,
137         .dtor = vmspace_dtor,
138         .ops = {
139                 .terminate = (sysref_terminate_func_t)vmspace_terminate,
140                 .lock = (sysref_lock_func_t)vmspace_lock,
141                 .unlock = (sysref_lock_func_t)vmspace_unlock
142         }
143 };
144
145 /*
146  * per-cpu page table cross mappings are initialized in early boot
147  * and might require a considerable number of vm_map_entry structures.
148  */
149 #define VMEPERCPU       (MAXCPU+1)
150
151 static struct vm_zone mapentzone_store, mapzone_store;
152 static vm_zone_t mapentzone, mapzone;
153 static struct vm_object mapentobj, mapobj;
154
155 static struct vm_map_entry map_entry_init[MAX_MAPENT];
156 static struct vm_map_entry cpu_map_entry_init[MAXCPU][VMEPERCPU];
157 static struct vm_map map_init[MAX_KMAP];
158
159 static int randomize_mmap;
160 SYSCTL_INT(_vm, OID_AUTO, randomize_mmap, CTLFLAG_RW, &randomize_mmap, 0,
161     "Randomize mmap offsets");
162
163 static void vm_map_entry_shadow(vm_map_entry_t entry);
164 static vm_map_entry_t vm_map_entry_create(vm_map_t map, int *);
165 static void vm_map_entry_dispose (vm_map_t map, vm_map_entry_t entry, int *);
166 static void _vm_map_clip_end (vm_map_t, vm_map_entry_t, vm_offset_t, int *);
167 static void _vm_map_clip_start (vm_map_t, vm_map_entry_t, vm_offset_t, int *);
168 static void vm_map_entry_delete (vm_map_t, vm_map_entry_t, int *);
169 static void vm_map_entry_unwire (vm_map_t, vm_map_entry_t);
170 static void vm_map_copy_entry (vm_map_t, vm_map_t, vm_map_entry_t,
171                 vm_map_entry_t);
172 static void vm_map_split (vm_map_entry_t);
173 static void vm_map_unclip_range (vm_map_t map, vm_map_entry_t start_entry, vm_offset_t start, vm_offset_t end, int *count, int flags);
174
175 /*
176  * Initialize the vm_map module.  Must be called before any other vm_map
177  * routines.
178  *
179  * Map and entry structures are allocated from the general purpose
180  * memory pool with some exceptions:
181  *
182  *      - The kernel map is allocated statically.
183  *      - Initial kernel map entries are allocated out of a static pool.
184  *
185  *      These restrictions are necessary since malloc() uses the
186  *      maps and requires map entries.
187  *
188  * Called from the low level boot code only.
189  */
190 void
191 vm_map_startup(void)
192 {
193         mapzone = &mapzone_store;
194         zbootinit(mapzone, "MAP", sizeof (struct vm_map),
195                 map_init, MAX_KMAP);
196         mapentzone = &mapentzone_store;
197         zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry),
198                 map_entry_init, MAX_MAPENT);
199 }
200
201 /*
202  * Called prior to any vmspace allocations.
203  *
204  * Called from the low level boot code only.
205  */
206 void
207 vm_init2(void) 
208 {
209         zinitna(mapentzone, &mapentobj, NULL, 0, 0, 
210                 ZONE_USE_RESERVE | ZONE_SPECIAL, 1);
211         zinitna(mapzone, &mapobj, NULL, 0, 0, 0, 1);
212         pmap_init2();
213         vm_object_init2();
214 }
215
216
217 /*
218  * Red black tree functions
219  *
220  * The caller must hold the related map lock.
221  */
222 static int rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b);
223 RB_GENERATE(vm_map_rb_tree, vm_map_entry, rb_entry, rb_vm_map_compare);
224
225 /* a->start is address, and the only field has to be initialized */
226 static int
227 rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b)
228 {
229         if (a->start < b->start)
230                 return(-1);
231         else if (a->start > b->start)
232                 return(1);
233         return(0);
234 }
235
236 /*
237  * Allocate a vmspace structure, including a vm_map and pmap.
238  * Initialize numerous fields.  While the initial allocation is zerod,
239  * subsequence reuse from the objcache leaves elements of the structure
240  * intact (particularly the pmap), so portions must be zerod.
241  *
242  * The structure is not considered activated until we call sysref_activate().
243  *
244  * No requirements.
245  */
246 struct vmspace *
247 vmspace_alloc(vm_offset_t min, vm_offset_t max)
248 {
249         struct vmspace *vm;
250
251         lwkt_gettoken(&vmspace_token);
252         vm = sysref_alloc(&vmspace_sysref_class);
253         bzero(&vm->vm_startcopy,
254               (char *)&vm->vm_endcopy - (char *)&vm->vm_startcopy);
255         vm_map_init(&vm->vm_map, min, max, NULL);
256         pmap_pinit(vmspace_pmap(vm));           /* (some fields reused) */
257         vm->vm_map.pmap = vmspace_pmap(vm);             /* XXX */
258         vm->vm_shm = NULL;
259         vm->vm_exitingcnt = 0;
260         cpu_vmspace_alloc(vm);
261         sysref_activate(&vm->vm_sysref);
262         lwkt_reltoken(&vmspace_token);
263
264         return (vm);
265 }
266
267 /*
268  * dtor function - Some elements of the pmap are retained in the
269  * free-cached vmspaces to improve performance.  We have to clean them up
270  * here before returning the vmspace to the memory pool.
271  *
272  * No requirements.
273  */
274 static void
275 vmspace_dtor(void *obj, void *private)
276 {
277         struct vmspace *vm = obj;
278
279         pmap_puninit(vmspace_pmap(vm));
280 }
281
282 /*
283  * Called in two cases: 
284  *
285  * (1) When the last sysref is dropped, but exitingcnt might still be
286  *     non-zero.
287  *
288  * (2) When there are no sysrefs (i.e. refcnt is negative) left and the
289  *     exitingcnt becomes zero
290  *
291  * sysref will not scrap the object until we call sysref_put() once more
292  * after the last ref has been dropped.
293  *
294  * Interlocked by the sysref API.
295  */
296 static void
297 vmspace_terminate(struct vmspace *vm)
298 {
299         int count;
300
301         /*
302          * If exitingcnt is non-zero we can't get rid of the entire vmspace
303          * yet, but we can scrap user memory.
304          */
305         lwkt_gettoken(&vmspace_token);
306         if (vm->vm_exitingcnt) {
307                 shmexit(vm);
308                 pmap_remove_pages(vmspace_pmap(vm), VM_MIN_USER_ADDRESS,
309                                   VM_MAX_USER_ADDRESS);
310                 vm_map_remove(&vm->vm_map, VM_MIN_USER_ADDRESS,
311                               VM_MAX_USER_ADDRESS);
312                 lwkt_reltoken(&vmspace_token);
313                 return;
314         }
315         cpu_vmspace_free(vm);
316
317         /*
318          * Make sure any SysV shm is freed, it might not have in
319          * exit1()
320          */
321         shmexit(vm);
322
323         KKASSERT(vm->vm_upcalls == NULL);
324
325         /*
326          * Lock the map, to wait out all other references to it.
327          * Delete all of the mappings and pages they hold, then call
328          * the pmap module to reclaim anything left.
329          */
330         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
331         vm_map_lock(&vm->vm_map);
332         vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
333                 vm->vm_map.max_offset, &count);
334         vm_map_unlock(&vm->vm_map);
335         vm_map_entry_release(count);
336
337         pmap_release(vmspace_pmap(vm));
338         sysref_put(&vm->vm_sysref);
339         lwkt_reltoken(&vmspace_token);
340 }
341
342 /*
343  * vmspaces are not currently locked.
344  */
345 static void
346 vmspace_lock(struct vmspace *vm __unused)
347 {
348 }
349
350 static void
351 vmspace_unlock(struct vmspace *vm __unused)
352 {
353 }
354
355 /*
356  * This is called during exit indicating that the vmspace is no
357  * longer in used by an exiting process, but the process has not yet
358  * been cleaned up.
359  *
360  * No requirements.
361  */
362 void
363 vmspace_exitbump(struct vmspace *vm)
364 {
365         lwkt_gettoken(&vmspace_token);
366         ++vm->vm_exitingcnt;
367         lwkt_reltoken(&vmspace_token);
368 }
369
370 /*
371  * This is called in the wait*() handling code.  The vmspace can be terminated
372  * after the last wait is finished using it.
373  *
374  * No requirements.
375  */
376 void
377 vmspace_exitfree(struct proc *p)
378 {
379         struct vmspace *vm;
380
381         lwkt_gettoken(&vmspace_token);
382         vm = p->p_vmspace;
383         p->p_vmspace = NULL;
384
385         if (--vm->vm_exitingcnt == 0 && sysref_isinactive(&vm->vm_sysref))
386                 vmspace_terminate(vm);
387         lwkt_reltoken(&vmspace_token);
388 }
389
390 /*
391  * Swap useage is determined by taking the proportional swap used by
392  * VM objects backing the VM map.  To make up for fractional losses,
393  * if the VM object has any swap use at all the associated map entries
394  * count for at least 1 swap page.
395  *
396  * No requirements.
397  */
398 int
399 vmspace_swap_count(struct vmspace *vmspace)
400 {
401         vm_map_t map = &vmspace->vm_map;
402         vm_map_entry_t cur;
403         vm_object_t object;
404         int count = 0;
405         int n;
406
407         lwkt_gettoken(&vmspace_token);
408         for (cur = map->header.next; cur != &map->header; cur = cur->next) {
409                 switch(cur->maptype) {
410                 case VM_MAPTYPE_NORMAL:
411                 case VM_MAPTYPE_VPAGETABLE:
412                         if ((object = cur->object.vm_object) == NULL)
413                                 break;
414                         if (object->swblock_count) {
415                                 n = (cur->end - cur->start) / PAGE_SIZE;
416                                 count += object->swblock_count *
417                                     SWAP_META_PAGES * n / object->size + 1;
418                         }
419                         break;
420                 default:
421                         break;
422                 }
423         }
424         lwkt_reltoken(&vmspace_token);
425         return(count);
426 }
427
428 /*
429  * Calculate the approximate number of anonymous pages in use by
430  * this vmspace.  To make up for fractional losses, we count each
431  * VM object as having at least 1 anonymous page.
432  *
433  * No requirements.
434  */
435 int
436 vmspace_anonymous_count(struct vmspace *vmspace)
437 {
438         vm_map_t map = &vmspace->vm_map;
439         vm_map_entry_t cur;
440         vm_object_t object;
441         int count = 0;
442
443         lwkt_gettoken(&vmspace_token);
444         for (cur = map->header.next; cur != &map->header; cur = cur->next) {
445                 switch(cur->maptype) {
446                 case VM_MAPTYPE_NORMAL:
447                 case VM_MAPTYPE_VPAGETABLE:
448                         if ((object = cur->object.vm_object) == NULL)
449                                 break;
450                         if (object->type != OBJT_DEFAULT &&
451                             object->type != OBJT_SWAP) {
452                                 break;
453                         }
454                         count += object->resident_page_count;
455                         break;
456                 default:
457                         break;
458                 }
459         }
460         lwkt_reltoken(&vmspace_token);
461         return(count);
462 }
463
464 /*
465  * Creates and returns a new empty VM map with the given physical map
466  * structure, and having the given lower and upper address bounds.
467  *
468  * No requirements.
469  */
470 vm_map_t
471 vm_map_create(vm_map_t result, pmap_t pmap, vm_offset_t min, vm_offset_t max)
472 {
473         if (result == NULL)
474                 result = zalloc(mapzone);
475         vm_map_init(result, min, max, pmap);
476         return (result);
477 }
478
479 /*
480  * Initialize an existing vm_map structure such as that in the vmspace
481  * structure.  The pmap is initialized elsewhere.
482  *
483  * No requirements.
484  */
485 void
486 vm_map_init(struct vm_map *map, vm_offset_t min, vm_offset_t max, pmap_t pmap)
487 {
488         map->header.next = map->header.prev = &map->header;
489         RB_INIT(&map->rb_root);
490         map->nentries = 0;
491         map->size = 0;
492         map->system_map = 0;
493         map->min_offset = min;
494         map->max_offset = max;
495         map->pmap = pmap;
496         map->first_free = &map->header;
497         map->hint = &map->header;
498         map->timestamp = 0;
499         map->flags = 0;
500         lockinit(&map->lock, "thrd_sleep", 0, 0);
501 }
502
503 /*
504  * Shadow the vm_map_entry's object.  This typically needs to be done when
505  * a write fault is taken on an entry which had previously been cloned by
506  * fork().  The shared object (which might be NULL) must become private so
507  * we add a shadow layer above it.
508  *
509  * Object allocation for anonymous mappings is defered as long as possible.
510  * When creating a shadow, however, the underlying object must be instantiated
511  * so it can be shared.
512  *
513  * If the map segment is governed by a virtual page table then it is
514  * possible to address offsets beyond the mapped area.  Just allocate
515  * a maximally sized object for this case.
516  *
517  * The vm_map must be exclusively locked.
518  * No other requirements.
519  */
520 static
521 void
522 vm_map_entry_shadow(vm_map_entry_t entry)
523 {
524         if (entry->maptype == VM_MAPTYPE_VPAGETABLE) {
525                 vm_object_shadow(&entry->object.vm_object, &entry->offset,
526                                  0x7FFFFFFF);   /* XXX */
527         } else {
528                 vm_object_shadow(&entry->object.vm_object, &entry->offset,
529                                  atop(entry->end - entry->start));
530         }
531         entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
532 }
533
534 /*
535  * Allocate an object for a vm_map_entry.
536  *
537  * Object allocation for anonymous mappings is defered as long as possible.
538  * This function is called when we can defer no longer, generally when a map
539  * entry might be split or forked or takes a page fault.
540  *
541  * If the map segment is governed by a virtual page table then it is
542  * possible to address offsets beyond the mapped area.  Just allocate
543  * a maximally sized object for this case.
544  *
545  * The vm_map must be exclusively locked.
546  * No other requirements.
547  */
548 void 
549 vm_map_entry_allocate_object(vm_map_entry_t entry)
550 {
551         vm_object_t obj;
552
553         if (entry->maptype == VM_MAPTYPE_VPAGETABLE) {
554                 obj = vm_object_allocate(OBJT_DEFAULT, 0x7FFFFFFF); /* XXX */
555         } else {
556                 obj = vm_object_allocate(OBJT_DEFAULT,
557                                          atop(entry->end - entry->start));
558         }
559         entry->object.vm_object = obj;
560         entry->offset = 0;
561 }
562
563 /*
564  * Set an initial negative count so the first attempt to reserve
565  * space preloads a bunch of vm_map_entry's for this cpu.  Also
566  * pre-allocate 2 vm_map_entries which will be needed by zalloc() to
567  * map a new page for vm_map_entry structures.  SMP systems are
568  * particularly sensitive.
569  *
570  * This routine is called in early boot so we cannot just call
571  * vm_map_entry_reserve().
572  *
573  * Called from the low level boot code only (for each cpu)
574  */
575 void
576 vm_map_entry_reserve_cpu_init(globaldata_t gd)
577 {
578         vm_map_entry_t entry;
579         int i;
580
581         gd->gd_vme_avail -= MAP_RESERVE_COUNT * 2;
582         entry = &cpu_map_entry_init[gd->gd_cpuid][0];
583         for (i = 0; i < VMEPERCPU; ++i, ++entry) {
584                 entry->next = gd->gd_vme_base;
585                 gd->gd_vme_base = entry;
586         }
587 }
588
589 /*
590  * Reserves vm_map_entry structures so code later on can manipulate
591  * map_entry structures within a locked map without blocking trying
592  * to allocate a new vm_map_entry.
593  *
594  * No requirements.
595  */
596 int
597 vm_map_entry_reserve(int count)
598 {
599         struct globaldata *gd = mycpu;
600         vm_map_entry_t entry;
601
602         /*
603          * Make sure we have enough structures in gd_vme_base to handle
604          * the reservation request.
605          */
606         crit_enter();
607         while (gd->gd_vme_avail < count) {
608                 entry = zalloc(mapentzone);
609                 entry->next = gd->gd_vme_base;
610                 gd->gd_vme_base = entry;
611                 ++gd->gd_vme_avail;
612         }
613         gd->gd_vme_avail -= count;
614         crit_exit();
615
616         return(count);
617 }
618
619 /*
620  * Releases previously reserved vm_map_entry structures that were not
621  * used.  If we have too much junk in our per-cpu cache clean some of
622  * it out.
623  *
624  * No requirements.
625  */
626 void
627 vm_map_entry_release(int count)
628 {
629         struct globaldata *gd = mycpu;
630         vm_map_entry_t entry;
631
632         crit_enter();
633         gd->gd_vme_avail += count;
634         while (gd->gd_vme_avail > MAP_RESERVE_SLOP) {
635                 entry = gd->gd_vme_base;
636                 KKASSERT(entry != NULL);
637                 gd->gd_vme_base = entry->next;
638                 --gd->gd_vme_avail;
639                 crit_exit();
640                 zfree(mapentzone, entry);
641                 crit_enter();
642         }
643         crit_exit();
644 }
645
646 /*
647  * Reserve map entry structures for use in kernel_map itself.  These
648  * entries have *ALREADY* been reserved on a per-cpu basis when the map
649  * was inited.  This function is used by zalloc() to avoid a recursion
650  * when zalloc() itself needs to allocate additional kernel memory.
651  *
652  * This function works like the normal reserve but does not load the
653  * vm_map_entry cache (because that would result in an infinite
654  * recursion).  Note that gd_vme_avail may go negative.  This is expected.
655  *
656  * Any caller of this function must be sure to renormalize after
657  * potentially eating entries to ensure that the reserve supply
658  * remains intact.
659  *
660  * No requirements.
661  */
662 int
663 vm_map_entry_kreserve(int count)
664 {
665         struct globaldata *gd = mycpu;
666
667         crit_enter();
668         gd->gd_vme_avail -= count;
669         crit_exit();
670         KASSERT(gd->gd_vme_base != NULL,
671                 ("no reserved entries left, gd_vme_avail = %d\n",
672                 gd->gd_vme_avail));
673         return(count);
674 }
675
676 /*
677  * Release previously reserved map entries for kernel_map.  We do not
678  * attempt to clean up like the normal release function as this would
679  * cause an unnecessary (but probably not fatal) deep procedure call.
680  *
681  * No requirements.
682  */
683 void
684 vm_map_entry_krelease(int count)
685 {
686         struct globaldata *gd = mycpu;
687
688         crit_enter();
689         gd->gd_vme_avail += count;
690         crit_exit();
691 }
692
693 /*
694  * Allocates a VM map entry for insertion.  No entry fields are filled in.
695  *
696  * The entries should have previously been reserved.  The reservation count
697  * is tracked in (*countp).
698  *
699  * No requirements.
700  */
701 static vm_map_entry_t
702 vm_map_entry_create(vm_map_t map, int *countp)
703 {
704         struct globaldata *gd = mycpu;
705         vm_map_entry_t entry;
706
707         KKASSERT(*countp > 0);
708         --*countp;
709         crit_enter();
710         entry = gd->gd_vme_base;
711         KASSERT(entry != NULL, ("gd_vme_base NULL! count %d", *countp));
712         gd->gd_vme_base = entry->next;
713         crit_exit();
714
715         return(entry);
716 }
717
718 /*
719  * Dispose of a vm_map_entry that is no longer being referenced.
720  *
721  * No requirements.
722  */
723 static void
724 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry, int *countp)
725 {
726         struct globaldata *gd = mycpu;
727
728         KKASSERT(map->hint != entry);
729         KKASSERT(map->first_free != entry);
730
731         ++*countp;
732         crit_enter();
733         entry->next = gd->gd_vme_base;
734         gd->gd_vme_base = entry;
735         crit_exit();
736 }
737
738
739 /*
740  * Insert/remove entries from maps.
741  *
742  * The related map must be exclusively locked.
743  * No other requirements.
744  *
745  * NOTE! We currently acquire the vmspace_token only to avoid races
746  *       against the pageout daemon's calls to vmspace_*_count(), which
747  *       are unable to safely lock the vm_map without potentially
748  *       deadlocking.
749  */
750 static __inline void
751 vm_map_entry_link(vm_map_t map,
752                   vm_map_entry_t after_where,
753                   vm_map_entry_t entry)
754 {
755         ASSERT_VM_MAP_LOCKED(map);
756
757         lwkt_gettoken(&vmspace_token);
758         map->nentries++;
759         entry->prev = after_where;
760         entry->next = after_where->next;
761         entry->next->prev = entry;
762         after_where->next = entry;
763         if (vm_map_rb_tree_RB_INSERT(&map->rb_root, entry))
764                 panic("vm_map_entry_link: dup addr map %p ent %p", map, entry);
765         lwkt_reltoken(&vmspace_token);
766 }
767
768 static __inline void
769 vm_map_entry_unlink(vm_map_t map,
770                     vm_map_entry_t entry)
771 {
772         vm_map_entry_t prev;
773         vm_map_entry_t next;
774
775         ASSERT_VM_MAP_LOCKED(map);
776
777         if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
778                 panic("vm_map_entry_unlink: attempt to mess with "
779                       "locked entry! %p", entry);
780         }
781         lwkt_gettoken(&vmspace_token);
782         prev = entry->prev;
783         next = entry->next;
784         next->prev = prev;
785         prev->next = next;
786         vm_map_rb_tree_RB_REMOVE(&map->rb_root, entry);
787         map->nentries--;
788         lwkt_reltoken(&vmspace_token);
789 }
790
791 /*
792  * Finds the map entry containing (or immediately preceding) the specified
793  * address in the given map.  The entry is returned in (*entry).
794  *
795  * The boolean result indicates whether the address is actually contained
796  * in the map.
797  *
798  * The related map must be locked.
799  * No other requirements.
800  */
801 boolean_t
802 vm_map_lookup_entry(vm_map_t map, vm_offset_t address, vm_map_entry_t *entry)
803 {
804         vm_map_entry_t tmp;
805         vm_map_entry_t last;
806
807         ASSERT_VM_MAP_LOCKED(map);
808 #if 0
809         /*
810          * XXX TEMPORARILY DISABLED.  For some reason our attempt to revive
811          * the hint code with the red-black lookup meets with system crashes
812          * and lockups.  We do not yet know why.
813          *
814          * It is possible that the problem is related to the setting
815          * of the hint during map_entry deletion, in the code specified
816          * at the GGG comment later on in this file.
817          */
818         /*
819          * Quickly check the cached hint, there's a good chance of a match.
820          */
821         if (map->hint != &map->header) {
822                 tmp = map->hint;
823                 if (address >= tmp->start && address < tmp->end) {
824                         *entry = tmp;
825                         return(TRUE);
826                 }
827         }
828 #endif
829
830         /*
831          * Locate the record from the top of the tree.  'last' tracks the
832          * closest prior record and is returned if no match is found, which
833          * in binary tree terms means tracking the most recent right-branch
834          * taken.  If there is no prior record, &map->header is returned.
835          */
836         last = &map->header;
837         tmp = RB_ROOT(&map->rb_root);
838
839         while (tmp) {
840                 if (address >= tmp->start) {
841                         if (address < tmp->end) {
842                                 *entry = tmp;
843                                 map->hint = tmp;
844                                 return(TRUE);
845                         }
846                         last = tmp;
847                         tmp = RB_RIGHT(tmp, rb_entry);
848                 } else {
849                         tmp = RB_LEFT(tmp, rb_entry);
850                 }
851         }
852         *entry = last;
853         return (FALSE);
854 }
855
856 /*
857  * Inserts the given whole VM object into the target map at the specified
858  * address range.  The object's size should match that of the address range.
859  *
860  * The map must be exclusively locked.
861  * The caller must have reserved sufficient vm_map_entry structures.
862  *
863  * If object is non-NULL, ref count must be bumped by caller
864  * prior to making call to account for the new entry.
865  */
866 int
867 vm_map_insert(vm_map_t map, int *countp,
868               vm_object_t object, vm_ooffset_t offset,
869               vm_offset_t start, vm_offset_t end,
870               vm_maptype_t maptype,
871               vm_prot_t prot, vm_prot_t max,
872               int cow)
873 {
874         vm_map_entry_t new_entry;
875         vm_map_entry_t prev_entry;
876         vm_map_entry_t temp_entry;
877         vm_eflags_t protoeflags;
878
879         ASSERT_VM_MAP_LOCKED(map);
880
881         /*
882          * Check that the start and end points are not bogus.
883          */
884         if ((start < map->min_offset) || (end > map->max_offset) ||
885             (start >= end))
886                 return (KERN_INVALID_ADDRESS);
887
888         /*
889          * Find the entry prior to the proposed starting address; if it's part
890          * of an existing entry, this range is bogus.
891          */
892         if (vm_map_lookup_entry(map, start, &temp_entry))
893                 return (KERN_NO_SPACE);
894
895         prev_entry = temp_entry;
896
897         /*
898          * Assert that the next entry doesn't overlap the end point.
899          */
900
901         if ((prev_entry->next != &map->header) &&
902             (prev_entry->next->start < end))
903                 return (KERN_NO_SPACE);
904
905         protoeflags = 0;
906
907         if (cow & MAP_COPY_ON_WRITE)
908                 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
909
910         if (cow & MAP_NOFAULT) {
911                 protoeflags |= MAP_ENTRY_NOFAULT;
912
913                 KASSERT(object == NULL,
914                         ("vm_map_insert: paradoxical MAP_NOFAULT request"));
915         }
916         if (cow & MAP_DISABLE_SYNCER)
917                 protoeflags |= MAP_ENTRY_NOSYNC;
918         if (cow & MAP_DISABLE_COREDUMP)
919                 protoeflags |= MAP_ENTRY_NOCOREDUMP;
920         if (cow & MAP_IS_STACK)
921                 protoeflags |= MAP_ENTRY_STACK;
922         if (cow & MAP_IS_KSTACK)
923                 protoeflags |= MAP_ENTRY_KSTACK;
924
925         lwkt_gettoken(&vm_token);
926         lwkt_gettoken(&vmobj_token);
927
928         if (object) {
929                 /*
930                  * When object is non-NULL, it could be shared with another
931                  * process.  We have to set or clear OBJ_ONEMAPPING 
932                  * appropriately.
933                  */
934
935                 if ((object->ref_count > 1) || (object->shadow_count != 0)) {
936                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
937                 }
938         }
939         else if ((prev_entry != &map->header) &&
940                  (prev_entry->eflags == protoeflags) &&
941                  (prev_entry->end == start) &&
942                  (prev_entry->wired_count == 0) &&
943                  prev_entry->maptype == maptype &&
944                  ((prev_entry->object.vm_object == NULL) ||
945                   vm_object_coalesce(prev_entry->object.vm_object,
946                                      OFF_TO_IDX(prev_entry->offset),
947                                      (vm_size_t)(prev_entry->end - prev_entry->start),
948                                      (vm_size_t)(end - prev_entry->end)))) {
949                 /*
950                  * We were able to extend the object.  Determine if we
951                  * can extend the previous map entry to include the 
952                  * new range as well.
953                  */
954                 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
955                     (prev_entry->protection == prot) &&
956                     (prev_entry->max_protection == max)) {
957                         lwkt_reltoken(&vmobj_token);
958                         lwkt_reltoken(&vm_token);
959                         map->size += (end - prev_entry->end);
960                         prev_entry->end = end;
961                         vm_map_simplify_entry(map, prev_entry, countp);
962                         return (KERN_SUCCESS);
963                 }
964
965                 /*
966                  * If we can extend the object but cannot extend the
967                  * map entry, we have to create a new map entry.  We
968                  * must bump the ref count on the extended object to
969                  * account for it.  object may be NULL.
970                  */
971                 object = prev_entry->object.vm_object;
972                 offset = prev_entry->offset +
973                         (prev_entry->end - prev_entry->start);
974                 vm_object_reference_locked(object);
975         }
976
977         lwkt_reltoken(&vmobj_token);
978         lwkt_reltoken(&vm_token);
979
980         /*
981          * NOTE: if conditionals fail, object can be NULL here.  This occurs
982          * in things like the buffer map where we manage kva but do not manage
983          * backing objects.
984          */
985
986         /*
987          * Create a new entry
988          */
989
990         new_entry = vm_map_entry_create(map, countp);
991         new_entry->start = start;
992         new_entry->end = end;
993
994         new_entry->maptype = maptype;
995         new_entry->eflags = protoeflags;
996         new_entry->object.vm_object = object;
997         new_entry->offset = offset;
998         new_entry->aux.master_pde = 0;
999
1000         new_entry->inheritance = VM_INHERIT_DEFAULT;
1001         new_entry->protection = prot;
1002         new_entry->max_protection = max;
1003         new_entry->wired_count = 0;
1004
1005         /*
1006          * Insert the new entry into the list
1007          */
1008
1009         vm_map_entry_link(map, prev_entry, new_entry);
1010         map->size += new_entry->end - new_entry->start;
1011
1012         /*
1013          * Update the free space hint.  Entries cannot overlap.
1014          * An exact comparison is needed to avoid matching
1015          * against the map->header.
1016          */
1017         if ((map->first_free == prev_entry) &&
1018             (prev_entry->end == new_entry->start)) {
1019                 map->first_free = new_entry;
1020         }
1021
1022 #if 0
1023         /*
1024          * Temporarily removed to avoid MAP_STACK panic, due to
1025          * MAP_STACK being a huge hack.  Will be added back in
1026          * when MAP_STACK (and the user stack mapping) is fixed.
1027          */
1028         /*
1029          * It may be possible to simplify the entry
1030          */
1031         vm_map_simplify_entry(map, new_entry, countp);
1032 #endif
1033
1034         /*
1035          * Try to pre-populate the page table.  Mappings governed by virtual
1036          * page tables cannot be prepopulated without a lot of work, so
1037          * don't try.
1038          */
1039         if ((cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) &&
1040             maptype != VM_MAPTYPE_VPAGETABLE) {
1041                 pmap_object_init_pt(map->pmap, start, prot,
1042                                     object, OFF_TO_IDX(offset), end - start,
1043                                     cow & MAP_PREFAULT_PARTIAL);
1044         }
1045
1046         return (KERN_SUCCESS);
1047 }
1048
1049 /*
1050  * Find sufficient space for `length' bytes in the given map, starting at
1051  * `start'.  Returns 0 on success, 1 on no space.
1052  *
1053  * This function will returned an arbitrarily aligned pointer.  If no
1054  * particular alignment is required you should pass align as 1.  Note that
1055  * the map may return PAGE_SIZE aligned pointers if all the lengths used in
1056  * the map are a multiple of PAGE_SIZE, even if you pass a smaller align
1057  * argument.
1058  *
1059  * 'align' should be a power of 2 but is not required to be.
1060  *
1061  * The map must be exclusively locked.
1062  * No other requirements.
1063  */
1064 int
1065 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length,
1066                  vm_size_t align, int flags, vm_offset_t *addr)
1067 {
1068         vm_map_entry_t entry, next;
1069         vm_offset_t end;
1070         vm_offset_t align_mask;
1071
1072         if (start < map->min_offset)
1073                 start = map->min_offset;
1074         if (start > map->max_offset)
1075                 return (1);
1076
1077         /*
1078          * If the alignment is not a power of 2 we will have to use
1079          * a mod/division, set align_mask to a special value.
1080          */
1081         if ((align | (align - 1)) + 1 != (align << 1))
1082                 align_mask = (vm_offset_t)-1;
1083         else
1084                 align_mask = align - 1;
1085
1086         /*
1087          * Look for the first possible address; if there's already something
1088          * at this address, we have to start after it.
1089          */
1090         if (start == map->min_offset) {
1091                 if ((entry = map->first_free) != &map->header)
1092                         start = entry->end;
1093         } else {
1094                 vm_map_entry_t tmp;
1095
1096                 if (vm_map_lookup_entry(map, start, &tmp))
1097                         start = tmp->end;
1098                 entry = tmp;
1099         }
1100
1101         /*
1102          * Look through the rest of the map, trying to fit a new region in the
1103          * gap between existing regions, or after the very last region.
1104          */
1105         for (;; start = (entry = next)->end) {
1106                 /*
1107                  * Adjust the proposed start by the requested alignment,
1108                  * be sure that we didn't wrap the address.
1109                  */
1110                 if (align_mask == (vm_offset_t)-1)
1111                         end = ((start + align - 1) / align) * align;
1112                 else
1113                         end = (start + align_mask) & ~align_mask;
1114                 if (end < start)
1115                         return (1);
1116                 start = end;
1117                 /*
1118                  * Find the end of the proposed new region.  Be sure we didn't
1119                  * go beyond the end of the map, or wrap around the address.
1120                  * Then check to see if this is the last entry or if the 
1121                  * proposed end fits in the gap between this and the next
1122                  * entry.
1123                  */
1124                 end = start + length;
1125                 if (end > map->max_offset || end < start)
1126                         return (1);
1127                 next = entry->next;
1128
1129                 /*
1130                  * If the next entry's start address is beyond the desired
1131                  * end address we may have found a good entry.
1132                  *
1133                  * If the next entry is a stack mapping we do not map into
1134                  * the stack's reserved space.
1135                  *
1136                  * XXX continue to allow mapping into the stack's reserved
1137                  * space if doing a MAP_STACK mapping inside a MAP_STACK
1138                  * mapping, for backwards compatibility.  But the caller
1139                  * really should use MAP_STACK | MAP_TRYFIXED if they
1140                  * want to do that.
1141                  */
1142                 if (next == &map->header)
1143                         break;
1144                 if (next->start >= end) {
1145                         if ((next->eflags & MAP_ENTRY_STACK) == 0)
1146                                 break;
1147                         if (flags & MAP_STACK)
1148                                 break;
1149                         if (next->start - next->aux.avail_ssize >= end)
1150                                 break;
1151                 }
1152         }
1153         map->hint = entry;
1154
1155         /*
1156          * Grow the kernel_map if necessary.  pmap_growkernel() will panic
1157          * if it fails.  The kernel_map is locked and nothing can steal
1158          * our address space if pmap_growkernel() blocks.
1159          *
1160          * NOTE: This may be unconditionally called for kldload areas on
1161          *       x86_64 because these do not bump kernel_vm_end (which would
1162          *       fill 128G worth of page tables!).  Therefore we must not
1163          *       retry.
1164          */
1165         if (map == &kernel_map) {
1166                 vm_offset_t kstop;
1167
1168                 kstop = round_page(start + length);
1169                 if (kstop > kernel_vm_end)
1170                         pmap_growkernel(start, kstop);
1171         }
1172         *addr = start;
1173         return (0);
1174 }
1175
1176 /*
1177  * vm_map_find finds an unallocated region in the target address map with
1178  * the given length.  The search is defined to be first-fit from the
1179  * specified address; the region found is returned in the same parameter.
1180  *
1181  * If object is non-NULL, ref count must be bumped by caller
1182  * prior to making call to account for the new entry.
1183  *
1184  * No requirements.  This function will lock the map temporarily.
1185  */
1186 int
1187 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1188             vm_offset_t *addr,  vm_size_t length, vm_size_t align,
1189             boolean_t fitit,
1190             vm_maptype_t maptype,
1191             vm_prot_t prot, vm_prot_t max,
1192             int cow)
1193 {
1194         vm_offset_t start;
1195         int result;
1196         int count;
1197
1198         start = *addr;
1199
1200         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1201         vm_map_lock(map);
1202         if (fitit) {
1203                 if (vm_map_findspace(map, start, length, align, 0, addr)) {
1204                         vm_map_unlock(map);
1205                         vm_map_entry_release(count);
1206                         return (KERN_NO_SPACE);
1207                 }
1208                 start = *addr;
1209         }
1210         result = vm_map_insert(map, &count, object, offset,
1211                                start, start + length,
1212                                maptype,
1213                                prot, max,
1214                                cow);
1215         vm_map_unlock(map);
1216         vm_map_entry_release(count);
1217
1218         return (result);
1219 }
1220
1221 /*
1222  * Simplify the given map entry by merging with either neighbor.  This
1223  * routine also has the ability to merge with both neighbors.
1224  *
1225  * This routine guarentees that the passed entry remains valid (though
1226  * possibly extended).  When merging, this routine may delete one or
1227  * both neighbors.  No action is taken on entries which have their
1228  * in-transition flag set.
1229  *
1230  * The map must be exclusively locked.
1231  */
1232 void
1233 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry, int *countp)
1234 {
1235         vm_map_entry_t next, prev;
1236         vm_size_t prevsize, esize;
1237
1238         if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1239                 ++mycpu->gd_cnt.v_intrans_coll;
1240                 return;
1241         }
1242
1243         if (entry->maptype == VM_MAPTYPE_SUBMAP)
1244                 return;
1245
1246         prev = entry->prev;
1247         if (prev != &map->header) {
1248                 prevsize = prev->end - prev->start;
1249                 if ( (prev->end == entry->start) &&
1250                      (prev->maptype == entry->maptype) &&
1251                      (prev->object.vm_object == entry->object.vm_object) &&
1252                      (!prev->object.vm_object ||
1253                         (prev->offset + prevsize == entry->offset)) &&
1254                      (prev->eflags == entry->eflags) &&
1255                      (prev->protection == entry->protection) &&
1256                      (prev->max_protection == entry->max_protection) &&
1257                      (prev->inheritance == entry->inheritance) &&
1258                      (prev->wired_count == entry->wired_count)) {
1259                         if (map->first_free == prev)
1260                                 map->first_free = entry;
1261                         if (map->hint == prev)
1262                                 map->hint = entry;
1263                         vm_map_entry_unlink(map, prev);
1264                         entry->start = prev->start;
1265                         entry->offset = prev->offset;
1266                         if (prev->object.vm_object)
1267                                 vm_object_deallocate(prev->object.vm_object);
1268                         vm_map_entry_dispose(map, prev, countp);
1269                 }
1270         }
1271
1272         next = entry->next;
1273         if (next != &map->header) {
1274                 esize = entry->end - entry->start;
1275                 if ((entry->end == next->start) &&
1276                     (next->maptype == entry->maptype) &&
1277                     (next->object.vm_object == entry->object.vm_object) &&
1278                      (!entry->object.vm_object ||
1279                         (entry->offset + esize == next->offset)) &&
1280                     (next->eflags == entry->eflags) &&
1281                     (next->protection == entry->protection) &&
1282                     (next->max_protection == entry->max_protection) &&
1283                     (next->inheritance == entry->inheritance) &&
1284                     (next->wired_count == entry->wired_count)) {
1285                         if (map->first_free == next)
1286                                 map->first_free = entry;
1287                         if (map->hint == next)
1288                                 map->hint = entry;
1289                         vm_map_entry_unlink(map, next);
1290                         entry->end = next->end;
1291                         if (next->object.vm_object)
1292                                 vm_object_deallocate(next->object.vm_object);
1293                         vm_map_entry_dispose(map, next, countp);
1294                 }
1295         }
1296 }
1297
1298 /*
1299  * Asserts that the given entry begins at or after the specified address.
1300  * If necessary, it splits the entry into two.
1301  */
1302 #define vm_map_clip_start(map, entry, startaddr, countp)                \
1303 {                                                                       \
1304         if (startaddr > entry->start)                                   \
1305                 _vm_map_clip_start(map, entry, startaddr, countp);      \
1306 }
1307
1308 /*
1309  * This routine is called only when it is known that the entry must be split.
1310  *
1311  * The map must be exclusively locked.
1312  */
1313 static void
1314 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start,
1315                    int *countp)
1316 {
1317         vm_map_entry_t new_entry;
1318
1319         /*
1320          * Split off the front portion -- note that we must insert the new
1321          * entry BEFORE this one, so that this entry has the specified
1322          * starting address.
1323          */
1324
1325         vm_map_simplify_entry(map, entry, countp);
1326
1327         /*
1328          * If there is no object backing this entry, we might as well create
1329          * one now.  If we defer it, an object can get created after the map
1330          * is clipped, and individual objects will be created for the split-up
1331          * map.  This is a bit of a hack, but is also about the best place to
1332          * put this improvement.
1333          */
1334         if (entry->object.vm_object == NULL && !map->system_map) {
1335                 vm_map_entry_allocate_object(entry);
1336         }
1337
1338         new_entry = vm_map_entry_create(map, countp);
1339         *new_entry = *entry;
1340
1341         new_entry->end = start;
1342         entry->offset += (start - entry->start);
1343         entry->start = start;
1344
1345         vm_map_entry_link(map, entry->prev, new_entry);
1346
1347         switch(entry->maptype) {
1348         case VM_MAPTYPE_NORMAL:
1349         case VM_MAPTYPE_VPAGETABLE:
1350                 vm_object_reference(new_entry->object.vm_object);
1351                 break;
1352         default:
1353                 break;
1354         }
1355 }
1356
1357 /*
1358  * Asserts that the given entry ends at or before the specified address.
1359  * If necessary, it splits the entry into two.
1360  *
1361  * The map must be exclusively locked.
1362  */
1363 #define vm_map_clip_end(map, entry, endaddr, countp)            \
1364 {                                                               \
1365         if (endaddr < entry->end)                               \
1366                 _vm_map_clip_end(map, entry, endaddr, countp);  \
1367 }
1368
1369 /*
1370  * This routine is called only when it is known that the entry must be split.
1371  *
1372  * The map must be exclusively locked.
1373  */
1374 static void
1375 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end,
1376                  int *countp)
1377 {
1378         vm_map_entry_t new_entry;
1379
1380         /*
1381          * If there is no object backing this entry, we might as well create
1382          * one now.  If we defer it, an object can get created after the map
1383          * is clipped, and individual objects will be created for the split-up
1384          * map.  This is a bit of a hack, but is also about the best place to
1385          * put this improvement.
1386          */
1387
1388         if (entry->object.vm_object == NULL && !map->system_map) {
1389                 vm_map_entry_allocate_object(entry);
1390         }
1391
1392         /*
1393          * Create a new entry and insert it AFTER the specified entry
1394          */
1395
1396         new_entry = vm_map_entry_create(map, countp);
1397         *new_entry = *entry;
1398
1399         new_entry->start = entry->end = end;
1400         new_entry->offset += (end - entry->start);
1401
1402         vm_map_entry_link(map, entry, new_entry);
1403
1404         switch(entry->maptype) {
1405         case VM_MAPTYPE_NORMAL:
1406         case VM_MAPTYPE_VPAGETABLE:
1407                 vm_object_reference(new_entry->object.vm_object);
1408                 break;
1409         default:
1410                 break;
1411         }
1412 }
1413
1414 /*
1415  * Asserts that the starting and ending region addresses fall within the
1416  * valid range for the map.
1417  */
1418 #define VM_MAP_RANGE_CHECK(map, start, end)     \
1419 {                                               \
1420         if (start < vm_map_min(map))            \
1421                 start = vm_map_min(map);        \
1422         if (end > vm_map_max(map))              \
1423                 end = vm_map_max(map);          \
1424         if (start > end)                        \
1425                 start = end;                    \
1426 }
1427
1428 /*
1429  * Used to block when an in-transition collison occurs.  The map
1430  * is unlocked for the sleep and relocked before the return.
1431  */
1432 void
1433 vm_map_transition_wait(vm_map_t map)
1434 {
1435         tsleep_interlock(map, 0);
1436         vm_map_unlock(map);
1437         tsleep(map, PINTERLOCKED, "vment", 0);
1438         vm_map_lock(map);
1439 }
1440
1441 /*
1442  * When we do blocking operations with the map lock held it is
1443  * possible that a clip might have occured on our in-transit entry,
1444  * requiring an adjustment to the entry in our loop.  These macros
1445  * help the pageable and clip_range code deal with the case.  The
1446  * conditional costs virtually nothing if no clipping has occured.
1447  */
1448
1449 #define CLIP_CHECK_BACK(entry, save_start)              \
1450     do {                                                \
1451             while (entry->start != save_start) {        \
1452                     entry = entry->prev;                \
1453                     KASSERT(entry != &map->header, ("bad entry clip")); \
1454             }                                           \
1455     } while(0)
1456
1457 #define CLIP_CHECK_FWD(entry, save_end)                 \
1458     do {                                                \
1459             while (entry->end != save_end) {            \
1460                     entry = entry->next;                \
1461                     KASSERT(entry != &map->header, ("bad entry clip")); \
1462             }                                           \
1463     } while(0)
1464
1465
1466 /*
1467  * Clip the specified range and return the base entry.  The
1468  * range may cover several entries starting at the returned base
1469  * and the first and last entry in the covering sequence will be
1470  * properly clipped to the requested start and end address.
1471  *
1472  * If no holes are allowed you should pass the MAP_CLIP_NO_HOLES
1473  * flag.
1474  *
1475  * The MAP_ENTRY_IN_TRANSITION flag will be set for the entries
1476  * covered by the requested range.
1477  *
1478  * The map must be exclusively locked on entry and will remain locked
1479  * on return. If no range exists or the range contains holes and you
1480  * specified that no holes were allowed, NULL will be returned.  This
1481  * routine may temporarily unlock the map in order avoid a deadlock when
1482  * sleeping.
1483  */
1484 static
1485 vm_map_entry_t
1486 vm_map_clip_range(vm_map_t map, vm_offset_t start, vm_offset_t end, 
1487                   int *countp, int flags)
1488 {
1489         vm_map_entry_t start_entry;
1490         vm_map_entry_t entry;
1491
1492         /*
1493          * Locate the entry and effect initial clipping.  The in-transition
1494          * case does not occur very often so do not try to optimize it.
1495          */
1496 again:
1497         if (vm_map_lookup_entry(map, start, &start_entry) == FALSE)
1498                 return (NULL);
1499         entry = start_entry;
1500         if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1501                 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1502                 ++mycpu->gd_cnt.v_intrans_coll;
1503                 ++mycpu->gd_cnt.v_intrans_wait;
1504                 vm_map_transition_wait(map);
1505                 /*
1506                  * entry and/or start_entry may have been clipped while
1507                  * we slept, or may have gone away entirely.  We have
1508                  * to restart from the lookup.
1509                  */
1510                 goto again;
1511         }
1512
1513         /*
1514          * Since we hold an exclusive map lock we do not have to restart
1515          * after clipping, even though clipping may block in zalloc.
1516          */
1517         vm_map_clip_start(map, entry, start, countp);
1518         vm_map_clip_end(map, entry, end, countp);
1519         entry->eflags |= MAP_ENTRY_IN_TRANSITION;
1520
1521         /*
1522          * Scan entries covered by the range.  When working on the next
1523          * entry a restart need only re-loop on the current entry which
1524          * we have already locked, since 'next' may have changed.  Also,
1525          * even though entry is safe, it may have been clipped so we
1526          * have to iterate forwards through the clip after sleeping.
1527          */
1528         while (entry->next != &map->header && entry->next->start < end) {
1529                 vm_map_entry_t next = entry->next;
1530
1531                 if (flags & MAP_CLIP_NO_HOLES) {
1532                         if (next->start > entry->end) {
1533                                 vm_map_unclip_range(map, start_entry,
1534                                         start, entry->end, countp, flags);
1535                                 return(NULL);
1536                         }
1537                 }
1538
1539                 if (next->eflags & MAP_ENTRY_IN_TRANSITION) {
1540                         vm_offset_t save_end = entry->end;
1541                         next->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1542                         ++mycpu->gd_cnt.v_intrans_coll;
1543                         ++mycpu->gd_cnt.v_intrans_wait;
1544                         vm_map_transition_wait(map);
1545
1546                         /*
1547                          * clips might have occured while we blocked.
1548                          */
1549                         CLIP_CHECK_FWD(entry, save_end);
1550                         CLIP_CHECK_BACK(start_entry, start);
1551                         continue;
1552                 }
1553                 /*
1554                  * No restart necessary even though clip_end may block, we
1555                  * are holding the map lock.
1556                  */
1557                 vm_map_clip_end(map, next, end, countp);
1558                 next->eflags |= MAP_ENTRY_IN_TRANSITION;
1559                 entry = next;
1560         }
1561         if (flags & MAP_CLIP_NO_HOLES) {
1562                 if (entry->end != end) {
1563                         vm_map_unclip_range(map, start_entry,
1564                                 start, entry->end, countp, flags);
1565                         return(NULL);
1566                 }
1567         }
1568         return(start_entry);
1569 }
1570
1571 /*
1572  * Undo the effect of vm_map_clip_range().  You should pass the same
1573  * flags and the same range that you passed to vm_map_clip_range().
1574  * This code will clear the in-transition flag on the entries and
1575  * wake up anyone waiting.  This code will also simplify the sequence
1576  * and attempt to merge it with entries before and after the sequence.
1577  *
1578  * The map must be locked on entry and will remain locked on return.
1579  *
1580  * Note that you should also pass the start_entry returned by
1581  * vm_map_clip_range().  However, if you block between the two calls
1582  * with the map unlocked please be aware that the start_entry may
1583  * have been clipped and you may need to scan it backwards to find
1584  * the entry corresponding with the original start address.  You are
1585  * responsible for this, vm_map_unclip_range() expects the correct
1586  * start_entry to be passed to it and will KASSERT otherwise.
1587  */
1588 static
1589 void
1590 vm_map_unclip_range(vm_map_t map, vm_map_entry_t start_entry,
1591                     vm_offset_t start, vm_offset_t end,
1592                     int *countp, int flags)
1593 {
1594         vm_map_entry_t entry;
1595
1596         entry = start_entry;
1597
1598         KASSERT(entry->start == start, ("unclip_range: illegal base entry"));
1599         while (entry != &map->header && entry->start < end) {
1600                 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
1601                         ("in-transition flag not set during unclip on: %p",
1602                         entry));
1603                 KASSERT(entry->end <= end,
1604                         ("unclip_range: tail wasn't clipped"));
1605                 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
1606                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
1607                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
1608                         wakeup(map);
1609                 }
1610                 entry = entry->next;
1611         }
1612
1613         /*
1614          * Simplification does not block so there is no restart case.
1615          */
1616         entry = start_entry;
1617         while (entry != &map->header && entry->start < end) {
1618                 vm_map_simplify_entry(map, entry, countp);
1619                 entry = entry->next;
1620         }
1621 }
1622
1623 /*
1624  * Mark the given range as handled by a subordinate map.
1625  *
1626  * This range must have been created with vm_map_find(), and no other
1627  * operations may have been performed on this range prior to calling
1628  * vm_map_submap().
1629  *
1630  * Submappings cannot be removed.
1631  *
1632  * No requirements.
1633  */
1634 int
1635 vm_map_submap(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_map_t submap)
1636 {
1637         vm_map_entry_t entry;
1638         int result = KERN_INVALID_ARGUMENT;
1639         int count;
1640
1641         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1642         vm_map_lock(map);
1643
1644         VM_MAP_RANGE_CHECK(map, start, end);
1645
1646         if (vm_map_lookup_entry(map, start, &entry)) {
1647                 vm_map_clip_start(map, entry, start, &count);
1648         } else {
1649                 entry = entry->next;
1650         }
1651
1652         vm_map_clip_end(map, entry, end, &count);
1653
1654         if ((entry->start == start) && (entry->end == end) &&
1655             ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1656             (entry->object.vm_object == NULL)) {
1657                 entry->object.sub_map = submap;
1658                 entry->maptype = VM_MAPTYPE_SUBMAP;
1659                 result = KERN_SUCCESS;
1660         }
1661         vm_map_unlock(map);
1662         vm_map_entry_release(count);
1663
1664         return (result);
1665 }
1666
1667 /*
1668  * Sets the protection of the specified address region in the target map. 
1669  * If "set_max" is specified, the maximum protection is to be set;
1670  * otherwise, only the current protection is affected.
1671  *
1672  * The protection is not applicable to submaps, but is applicable to normal
1673  * maps and maps governed by virtual page tables.  For example, when operating
1674  * on a virtual page table our protection basically controls how COW occurs
1675  * on the backing object, whereas the virtual page table abstraction itself
1676  * is an abstraction for userland.
1677  *
1678  * No requirements.
1679  */
1680 int
1681 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1682                vm_prot_t new_prot, boolean_t set_max)
1683 {
1684         vm_map_entry_t current;
1685         vm_map_entry_t entry;
1686         int count;
1687
1688         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1689         vm_map_lock(map);
1690
1691         VM_MAP_RANGE_CHECK(map, start, end);
1692
1693         if (vm_map_lookup_entry(map, start, &entry)) {
1694                 vm_map_clip_start(map, entry, start, &count);
1695         } else {
1696                 entry = entry->next;
1697         }
1698
1699         /*
1700          * Make a first pass to check for protection violations.
1701          */
1702         current = entry;
1703         while ((current != &map->header) && (current->start < end)) {
1704                 if (current->maptype == VM_MAPTYPE_SUBMAP) {
1705                         vm_map_unlock(map);
1706                         vm_map_entry_release(count);
1707                         return (KERN_INVALID_ARGUMENT);
1708                 }
1709                 if ((new_prot & current->max_protection) != new_prot) {
1710                         vm_map_unlock(map);
1711                         vm_map_entry_release(count);
1712                         return (KERN_PROTECTION_FAILURE);
1713                 }
1714                 current = current->next;
1715         }
1716
1717         /*
1718          * Go back and fix up protections. [Note that clipping is not
1719          * necessary the second time.]
1720          */
1721         current = entry;
1722
1723         while ((current != &map->header) && (current->start < end)) {
1724                 vm_prot_t old_prot;
1725
1726                 vm_map_clip_end(map, current, end, &count);
1727
1728                 old_prot = current->protection;
1729                 if (set_max) {
1730                         current->protection =
1731                             (current->max_protection = new_prot) &
1732                             old_prot;
1733                 } else {
1734                         current->protection = new_prot;
1735                 }
1736
1737                 /*
1738                  * Update physical map if necessary. Worry about copy-on-write
1739                  * here -- CHECK THIS XXX
1740                  */
1741
1742                 if (current->protection != old_prot) {
1743 #define MASK(entry)     (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1744                                                         VM_PROT_ALL)
1745
1746                         pmap_protect(map->pmap, current->start,
1747                             current->end,
1748                             current->protection & MASK(current));
1749 #undef  MASK
1750                 }
1751
1752                 vm_map_simplify_entry(map, current, &count);
1753
1754                 current = current->next;
1755         }
1756
1757         vm_map_unlock(map);
1758         vm_map_entry_release(count);
1759         return (KERN_SUCCESS);
1760 }
1761
1762 /*
1763  * This routine traverses a processes map handling the madvise
1764  * system call.  Advisories are classified as either those effecting
1765  * the vm_map_entry structure, or those effecting the underlying
1766  * objects.
1767  *
1768  * The <value> argument is used for extended madvise calls.
1769  *
1770  * No requirements.
1771  */
1772 int
1773 vm_map_madvise(vm_map_t map, vm_offset_t start, vm_offset_t end,
1774                int behav, off_t value)
1775 {
1776         vm_map_entry_t current, entry;
1777         int modify_map = 0;
1778         int error = 0;
1779         int count;
1780
1781         /*
1782          * Some madvise calls directly modify the vm_map_entry, in which case
1783          * we need to use an exclusive lock on the map and we need to perform 
1784          * various clipping operations.  Otherwise we only need a read-lock
1785          * on the map.
1786          */
1787
1788         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1789
1790         switch(behav) {
1791         case MADV_NORMAL:
1792         case MADV_SEQUENTIAL:
1793         case MADV_RANDOM:
1794         case MADV_NOSYNC:
1795         case MADV_AUTOSYNC:
1796         case MADV_NOCORE:
1797         case MADV_CORE:
1798         case MADV_SETMAP:
1799         case MADV_INVAL:
1800                 modify_map = 1;
1801                 vm_map_lock(map);
1802                 break;
1803         case MADV_WILLNEED:
1804         case MADV_DONTNEED:
1805         case MADV_FREE:
1806                 vm_map_lock_read(map);
1807                 break;
1808         default:
1809                 vm_map_entry_release(count);
1810                 return (EINVAL);
1811         }
1812
1813         /*
1814          * Locate starting entry and clip if necessary.
1815          */
1816
1817         VM_MAP_RANGE_CHECK(map, start, end);
1818
1819         if (vm_map_lookup_entry(map, start, &entry)) {
1820                 if (modify_map)
1821                         vm_map_clip_start(map, entry, start, &count);
1822         } else {
1823                 entry = entry->next;
1824         }
1825
1826         if (modify_map) {
1827                 /*
1828                  * madvise behaviors that are implemented in the vm_map_entry.
1829                  *
1830                  * We clip the vm_map_entry so that behavioral changes are
1831                  * limited to the specified address range.
1832                  */
1833                 for (current = entry;
1834                      (current != &map->header) && (current->start < end);
1835                      current = current->next
1836                 ) {
1837                         if (current->maptype == VM_MAPTYPE_SUBMAP)
1838                                 continue;
1839
1840                         vm_map_clip_end(map, current, end, &count);
1841
1842                         switch (behav) {
1843                         case MADV_NORMAL:
1844                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
1845                                 break;
1846                         case MADV_SEQUENTIAL:
1847                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
1848                                 break;
1849                         case MADV_RANDOM:
1850                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
1851                                 break;
1852                         case MADV_NOSYNC:
1853                                 current->eflags |= MAP_ENTRY_NOSYNC;
1854                                 break;
1855                         case MADV_AUTOSYNC:
1856                                 current->eflags &= ~MAP_ENTRY_NOSYNC;
1857                                 break;
1858                         case MADV_NOCORE:
1859                                 current->eflags |= MAP_ENTRY_NOCOREDUMP;
1860                                 break;
1861                         case MADV_CORE:
1862                                 current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
1863                                 break;
1864                         case MADV_INVAL:
1865                                 /*
1866                                  * Invalidate the related pmap entries, used
1867                                  * to flush portions of the real kernel's
1868                                  * pmap when the caller has removed or
1869                                  * modified existing mappings in a virtual
1870                                  * page table.
1871                                  */
1872                                 pmap_remove(map->pmap,
1873                                             current->start, current->end);
1874                                 break;
1875                         case MADV_SETMAP:
1876                                 /*
1877                                  * Set the page directory page for a map
1878                                  * governed by a virtual page table.  Mark
1879                                  * the entry as being governed by a virtual
1880                                  * page table if it is not.
1881                                  *
1882                                  * XXX the page directory page is stored
1883                                  * in the avail_ssize field if the map_entry.
1884                                  *
1885                                  * XXX the map simplification code does not
1886                                  * compare this field so weird things may
1887                                  * happen if you do not apply this function
1888                                  * to the entire mapping governed by the
1889                                  * virtual page table.
1890                                  */
1891                                 if (current->maptype != VM_MAPTYPE_VPAGETABLE) {
1892                                         error = EINVAL;
1893                                         break;
1894                                 }
1895                                 current->aux.master_pde = value;
1896                                 pmap_remove(map->pmap,
1897                                             current->start, current->end);
1898                                 break;
1899                         default:
1900                                 error = EINVAL;
1901                                 break;
1902                         }
1903                         vm_map_simplify_entry(map, current, &count);
1904                 }
1905                 vm_map_unlock(map);
1906         } else {
1907                 vm_pindex_t pindex;
1908                 int count;
1909
1910                 /*
1911                  * madvise behaviors that are implemented in the underlying
1912                  * vm_object.
1913                  *
1914                  * Since we don't clip the vm_map_entry, we have to clip
1915                  * the vm_object pindex and count.
1916                  *
1917                  * NOTE!  We currently do not support these functions on
1918                  * virtual page tables.
1919                  */
1920                 for (current = entry;
1921                      (current != &map->header) && (current->start < end);
1922                      current = current->next
1923                 ) {
1924                         vm_offset_t useStart;
1925
1926                         if (current->maptype != VM_MAPTYPE_NORMAL)
1927                                 continue;
1928
1929                         pindex = OFF_TO_IDX(current->offset);
1930                         count = atop(current->end - current->start);
1931                         useStart = current->start;
1932
1933                         if (current->start < start) {
1934                                 pindex += atop(start - current->start);
1935                                 count -= atop(start - current->start);
1936                                 useStart = start;
1937                         }
1938                         if (current->end > end)
1939                                 count -= atop(current->end - end);
1940
1941                         if (count <= 0)
1942                                 continue;
1943
1944                         vm_object_madvise(current->object.vm_object,
1945                                           pindex, count, behav);
1946
1947                         /*
1948                          * Try to populate the page table.  Mappings governed
1949                          * by virtual page tables cannot be pre-populated
1950                          * without a lot of work so don't try.
1951                          */
1952                         if (behav == MADV_WILLNEED &&
1953                             current->maptype != VM_MAPTYPE_VPAGETABLE) {
1954                                 pmap_object_init_pt(
1955                                     map->pmap, 
1956                                     useStart,
1957                                     current->protection,
1958                                     current->object.vm_object,
1959                                     pindex, 
1960                                     (count << PAGE_SHIFT),
1961                                     MAP_PREFAULT_MADVISE
1962                                 );
1963                         }
1964                 }
1965                 vm_map_unlock_read(map);
1966         }
1967         vm_map_entry_release(count);
1968         return(error);
1969 }       
1970
1971
1972 /*
1973  * Sets the inheritance of the specified address range in the target map.
1974  * Inheritance affects how the map will be shared with child maps at the
1975  * time of vm_map_fork.
1976  */
1977 int
1978 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
1979                vm_inherit_t new_inheritance)
1980 {
1981         vm_map_entry_t entry;
1982         vm_map_entry_t temp_entry;
1983         int count;
1984
1985         switch (new_inheritance) {
1986         case VM_INHERIT_NONE:
1987         case VM_INHERIT_COPY:
1988         case VM_INHERIT_SHARE:
1989                 break;
1990         default:
1991                 return (KERN_INVALID_ARGUMENT);
1992         }
1993
1994         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1995         vm_map_lock(map);
1996
1997         VM_MAP_RANGE_CHECK(map, start, end);
1998
1999         if (vm_map_lookup_entry(map, start, &temp_entry)) {
2000                 entry = temp_entry;
2001                 vm_map_clip_start(map, entry, start, &count);
2002         } else
2003                 entry = temp_entry->next;
2004
2005         while ((entry != &map->header) && (entry->start < end)) {
2006                 vm_map_clip_end(map, entry, end, &count);
2007
2008                 entry->inheritance = new_inheritance;
2009
2010                 vm_map_simplify_entry(map, entry, &count);
2011
2012                 entry = entry->next;
2013         }
2014         vm_map_unlock(map);
2015         vm_map_entry_release(count);
2016         return (KERN_SUCCESS);
2017 }
2018
2019 /*
2020  * Implement the semantics of mlock
2021  */
2022 int
2023 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t real_end,
2024               boolean_t new_pageable)
2025 {
2026         vm_map_entry_t entry;
2027         vm_map_entry_t start_entry;
2028         vm_offset_t end;
2029         int rv = KERN_SUCCESS;
2030         int count;
2031
2032         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2033         vm_map_lock(map);
2034         VM_MAP_RANGE_CHECK(map, start, real_end);
2035         end = real_end;
2036
2037         start_entry = vm_map_clip_range(map, start, end, &count,
2038                                         MAP_CLIP_NO_HOLES);
2039         if (start_entry == NULL) {
2040                 vm_map_unlock(map);
2041                 vm_map_entry_release(count);
2042                 return (KERN_INVALID_ADDRESS);
2043         }
2044
2045         if (new_pageable == 0) {
2046                 entry = start_entry;
2047                 while ((entry != &map->header) && (entry->start < end)) {
2048                         vm_offset_t save_start;
2049                         vm_offset_t save_end;
2050
2051                         /*
2052                          * Already user wired or hard wired (trivial cases)
2053                          */
2054                         if (entry->eflags & MAP_ENTRY_USER_WIRED) {
2055                                 entry = entry->next;
2056                                 continue;
2057                         }
2058                         if (entry->wired_count != 0) {
2059                                 entry->wired_count++;
2060                                 entry->eflags |= MAP_ENTRY_USER_WIRED;
2061                                 entry = entry->next;
2062                                 continue;
2063                         }
2064
2065                         /*
2066                          * A new wiring requires instantiation of appropriate
2067                          * management structures and the faulting in of the
2068                          * page.
2069                          */
2070                         if (entry->maptype != VM_MAPTYPE_SUBMAP) {
2071                                 int copyflag = entry->eflags &
2072                                                MAP_ENTRY_NEEDS_COPY;
2073                                 if (copyflag && ((entry->protection &
2074                                                   VM_PROT_WRITE) != 0)) {
2075                                         vm_map_entry_shadow(entry);
2076                                 } else if (entry->object.vm_object == NULL &&
2077                                            !map->system_map) {
2078                                         vm_map_entry_allocate_object(entry);
2079                                 }
2080                         }
2081                         entry->wired_count++;
2082                         entry->eflags |= MAP_ENTRY_USER_WIRED;
2083
2084                         /*
2085                          * Now fault in the area.  Note that vm_fault_wire()
2086                          * may release the map lock temporarily, it will be
2087                          * relocked on return.  The in-transition
2088                          * flag protects the entries. 
2089                          */
2090                         save_start = entry->start;
2091                         save_end = entry->end;
2092                         rv = vm_fault_wire(map, entry, TRUE);
2093                         if (rv) {
2094                                 CLIP_CHECK_BACK(entry, save_start);
2095                                 for (;;) {
2096                                         KASSERT(entry->wired_count == 1, ("bad wired_count on entry"));
2097                                         entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2098                                         entry->wired_count = 0;
2099                                         if (entry->end == save_end)
2100                                                 break;
2101                                         entry = entry->next;
2102                                         KASSERT(entry != &map->header, ("bad entry clip during backout"));
2103                                 }
2104                                 end = save_start;       /* unwire the rest */
2105                                 break;
2106                         }
2107                         /*
2108                          * note that even though the entry might have been
2109                          * clipped, the USER_WIRED flag we set prevents
2110                          * duplication so we do not have to do a 
2111                          * clip check.
2112                          */
2113                         entry = entry->next;
2114                 }
2115
2116                 /*
2117                  * If we failed fall through to the unwiring section to
2118                  * unwire what we had wired so far.  'end' has already
2119                  * been adjusted.
2120                  */
2121                 if (rv)
2122                         new_pageable = 1;
2123
2124                 /*
2125                  * start_entry might have been clipped if we unlocked the
2126                  * map and blocked.  No matter how clipped it has gotten
2127                  * there should be a fragment that is on our start boundary.
2128                  */
2129                 CLIP_CHECK_BACK(start_entry, start);
2130         }
2131
2132         /*
2133          * Deal with the unwiring case.
2134          */
2135         if (new_pageable) {
2136                 /*
2137                  * This is the unwiring case.  We must first ensure that the
2138                  * range to be unwired is really wired down.  We know there
2139                  * are no holes.
2140                  */
2141                 entry = start_entry;
2142                 while ((entry != &map->header) && (entry->start < end)) {
2143                         if ((entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2144                                 rv = KERN_INVALID_ARGUMENT;
2145                                 goto done;
2146                         }
2147                         KASSERT(entry->wired_count != 0, ("wired count was 0 with USER_WIRED set! %p", entry));
2148                         entry = entry->next;
2149                 }
2150
2151                 /*
2152                  * Now decrement the wiring count for each region. If a region
2153                  * becomes completely unwired, unwire its physical pages and
2154                  * mappings.
2155                  */
2156                 /*
2157                  * The map entries are processed in a loop, checking to
2158                  * make sure the entry is wired and asserting it has a wired
2159                  * count. However, another loop was inserted more-or-less in
2160                  * the middle of the unwiring path. This loop picks up the
2161                  * "entry" loop variable from the first loop without first
2162                  * setting it to start_entry. Naturally, the secound loop
2163                  * is never entered and the pages backing the entries are
2164                  * never unwired. This can lead to a leak of wired pages.
2165                  */
2166                 entry = start_entry;
2167                 while ((entry != &map->header) && (entry->start < end)) {
2168                         KASSERT(entry->eflags & MAP_ENTRY_USER_WIRED,
2169                                 ("expected USER_WIRED on entry %p", entry));
2170                         entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2171                         entry->wired_count--;
2172                         if (entry->wired_count == 0)
2173                                 vm_fault_unwire(map, entry);
2174                         entry = entry->next;
2175                 }
2176         }
2177 done:
2178         vm_map_unclip_range(map, start_entry, start, real_end, &count,
2179                 MAP_CLIP_NO_HOLES);
2180         map->timestamp++;
2181         vm_map_unlock(map);
2182         vm_map_entry_release(count);
2183         return (rv);
2184 }
2185
2186 /*
2187  * Sets the pageability of the specified address range in the target map.
2188  * Regions specified as not pageable require locked-down physical
2189  * memory and physical page maps.
2190  *
2191  * The map must not be locked, but a reference must remain to the map
2192  * throughout the call.
2193  *
2194  * This function may be called via the zalloc path and must properly
2195  * reserve map entries for kernel_map.
2196  *
2197  * No requirements.
2198  */
2199 int
2200 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t real_end, int kmflags)
2201 {
2202         vm_map_entry_t entry;
2203         vm_map_entry_t start_entry;
2204         vm_offset_t end;
2205         int rv = KERN_SUCCESS;
2206         int count;
2207
2208         if (kmflags & KM_KRESERVE)
2209                 count = vm_map_entry_kreserve(MAP_RESERVE_COUNT);
2210         else
2211                 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2212         vm_map_lock(map);
2213         VM_MAP_RANGE_CHECK(map, start, real_end);
2214         end = real_end;
2215
2216         start_entry = vm_map_clip_range(map, start, end, &count,
2217                                         MAP_CLIP_NO_HOLES);
2218         if (start_entry == NULL) {
2219                 vm_map_unlock(map);
2220                 rv = KERN_INVALID_ADDRESS;
2221                 goto failure;
2222         }
2223         if ((kmflags & KM_PAGEABLE) == 0) {
2224                 /*
2225                  * Wiring.  
2226                  *
2227                  * 1.  Holding the write lock, we create any shadow or zero-fill
2228                  * objects that need to be created. Then we clip each map
2229                  * entry to the region to be wired and increment its wiring
2230                  * count.  We create objects before clipping the map entries
2231                  * to avoid object proliferation.
2232                  *
2233                  * 2.  We downgrade to a read lock, and call vm_fault_wire to
2234                  * fault in the pages for any newly wired area (wired_count is
2235                  * 1).
2236                  *
2237                  * Downgrading to a read lock for vm_fault_wire avoids a 
2238                  * possible deadlock with another process that may have faulted
2239                  * on one of the pages to be wired (it would mark the page busy,
2240                  * blocking us, then in turn block on the map lock that we
2241                  * hold).  Because of problems in the recursive lock package,
2242                  * we cannot upgrade to a write lock in vm_map_lookup.  Thus,
2243                  * any actions that require the write lock must be done
2244                  * beforehand.  Because we keep the read lock on the map, the
2245                  * copy-on-write status of the entries we modify here cannot
2246                  * change.
2247                  */
2248                 entry = start_entry;
2249                 while ((entry != &map->header) && (entry->start < end)) {
2250                         /*
2251                          * Trivial case if the entry is already wired
2252                          */
2253                         if (entry->wired_count) {
2254                                 entry->wired_count++;
2255                                 entry = entry->next;
2256                                 continue;
2257                         }
2258
2259                         /*
2260                          * The entry is being newly wired, we have to setup
2261                          * appropriate management structures.  A shadow 
2262                          * object is required for a copy-on-write region,
2263                          * or a normal object for a zero-fill region.  We
2264                          * do not have to do this for entries that point to sub
2265                          * maps because we won't hold the lock on the sub map.
2266                          */
2267                         if (entry->maptype != VM_MAPTYPE_SUBMAP) {
2268                                 int copyflag = entry->eflags &
2269                                                MAP_ENTRY_NEEDS_COPY;
2270                                 if (copyflag && ((entry->protection &
2271                                                   VM_PROT_WRITE) != 0)) {
2272                                         vm_map_entry_shadow(entry);
2273                                 } else if (entry->object.vm_object == NULL &&
2274                                            !map->system_map) {
2275                                         vm_map_entry_allocate_object(entry);
2276                                 }
2277                         }
2278
2279                         entry->wired_count++;
2280                         entry = entry->next;
2281                 }
2282
2283                 /*
2284                  * Pass 2.
2285                  */
2286
2287                 /*
2288                  * HACK HACK HACK HACK
2289                  *
2290                  * vm_fault_wire() temporarily unlocks the map to avoid
2291                  * deadlocks.  The in-transition flag from vm_map_clip_range
2292                  * call should protect us from changes while the map is
2293                  * unlocked.  T
2294                  *
2295                  * NOTE: Previously this comment stated that clipping might
2296                  *       still occur while the entry is unlocked, but from
2297                  *       what I can tell it actually cannot.
2298                  *
2299                  *       It is unclear whether the CLIP_CHECK_*() calls
2300                  *       are still needed but we keep them in anyway.
2301                  *
2302                  * HACK HACK HACK HACK
2303                  */
2304
2305                 entry = start_entry;
2306                 while (entry != &map->header && entry->start < end) {
2307                         /*
2308                          * If vm_fault_wire fails for any page we need to undo
2309                          * what has been done.  We decrement the wiring count
2310                          * for those pages which have not yet been wired (now)
2311                          * and unwire those that have (later).
2312                          */
2313                         vm_offset_t save_start = entry->start;
2314                         vm_offset_t save_end = entry->end;
2315
2316                         if (entry->wired_count == 1)
2317                                 rv = vm_fault_wire(map, entry, FALSE);
2318                         if (rv) {
2319                                 CLIP_CHECK_BACK(entry, save_start);
2320                                 for (;;) {
2321                                         KASSERT(entry->wired_count == 1, ("wired_count changed unexpectedly"));
2322                                         entry->wired_count = 0;
2323                                         if (entry->end == save_end)
2324                                                 break;
2325                                         entry = entry->next;
2326                                         KASSERT(entry != &map->header, ("bad entry clip during backout"));
2327                                 }
2328                                 end = save_start;
2329                                 break;
2330                         }
2331                         CLIP_CHECK_FWD(entry, save_end);
2332                         entry = entry->next;
2333                 }
2334
2335                 /*
2336                  * If a failure occured undo everything by falling through
2337                  * to the unwiring code.  'end' has already been adjusted
2338                  * appropriately.
2339                  */
2340                 if (rv)
2341                         kmflags |= KM_PAGEABLE;
2342
2343                 /*
2344                  * start_entry is still IN_TRANSITION but may have been 
2345                  * clipped since vm_fault_wire() unlocks and relocks the
2346                  * map.  No matter how clipped it has gotten there should
2347                  * be a fragment that is on our start boundary.
2348                  */
2349                 CLIP_CHECK_BACK(start_entry, start);
2350         }
2351
2352         if (kmflags & KM_PAGEABLE) {
2353                 /*
2354                  * This is the unwiring case.  We must first ensure that the
2355                  * range to be unwired is really wired down.  We know there
2356                  * are no holes.
2357                  */
2358                 entry = start_entry;
2359                 while ((entry != &map->header) && (entry->start < end)) {
2360                         if (entry->wired_count == 0) {
2361                                 rv = KERN_INVALID_ARGUMENT;
2362                                 goto done;
2363                         }
2364                         entry = entry->next;
2365                 }
2366
2367                 /*
2368                  * Now decrement the wiring count for each region. If a region
2369                  * becomes completely unwired, unwire its physical pages and
2370                  * mappings.
2371                  */
2372                 entry = start_entry;
2373                 while ((entry != &map->header) && (entry->start < end)) {
2374                         entry->wired_count--;
2375                         if (entry->wired_count == 0)
2376                                 vm_fault_unwire(map, entry);
2377                         entry = entry->next;
2378                 }
2379         }
2380 done:
2381         vm_map_unclip_range(map, start_entry, start, real_end,
2382                             &count, MAP_CLIP_NO_HOLES);
2383         map->timestamp++;
2384         vm_map_unlock(map);
2385 failure:
2386         if (kmflags & KM_KRESERVE)
2387                 vm_map_entry_krelease(count);
2388         else
2389                 vm_map_entry_release(count);
2390         return (rv);
2391 }
2392
2393 /*
2394  * Mark a newly allocated address range as wired but do not fault in
2395  * the pages.  The caller is expected to load the pages into the object.
2396  *
2397  * The map must be locked on entry and will remain locked on return.
2398  * No other requirements.
2399  */
2400 void
2401 vm_map_set_wired_quick(vm_map_t map, vm_offset_t addr, vm_size_t size,
2402                        int *countp)
2403 {
2404         vm_map_entry_t scan;
2405         vm_map_entry_t entry;
2406
2407         entry = vm_map_clip_range(map, addr, addr + size,
2408                                   countp, MAP_CLIP_NO_HOLES);
2409         for (scan = entry;
2410              scan != &map->header && scan->start < addr + size;
2411              scan = scan->next) {
2412             KKASSERT(entry->wired_count == 0);
2413             entry->wired_count = 1;                                              
2414         }
2415         vm_map_unclip_range(map, entry, addr, addr + size,
2416                             countp, MAP_CLIP_NO_HOLES);
2417 }
2418
2419 /*
2420  * Push any dirty cached pages in the address range to their pager.
2421  * If syncio is TRUE, dirty pages are written synchronously.
2422  * If invalidate is TRUE, any cached pages are freed as well.
2423  *
2424  * This routine is called by sys_msync()
2425  *
2426  * Returns an error if any part of the specified range is not mapped.
2427  *
2428  * No requirements.
2429  */
2430 int
2431 vm_map_clean(vm_map_t map, vm_offset_t start, vm_offset_t end,
2432              boolean_t syncio, boolean_t invalidate)
2433 {
2434         vm_map_entry_t current;
2435         vm_map_entry_t entry;
2436         vm_size_t size;
2437         vm_object_t object;
2438         vm_ooffset_t offset;
2439
2440         vm_map_lock_read(map);
2441         VM_MAP_RANGE_CHECK(map, start, end);
2442         if (!vm_map_lookup_entry(map, start, &entry)) {
2443                 vm_map_unlock_read(map);
2444                 return (KERN_INVALID_ADDRESS);
2445         }
2446         /*
2447          * Make a first pass to check for holes.
2448          */
2449         for (current = entry; current->start < end; current = current->next) {
2450                 if (current->maptype == VM_MAPTYPE_SUBMAP) {
2451                         vm_map_unlock_read(map);
2452                         return (KERN_INVALID_ARGUMENT);
2453                 }
2454                 if (end > current->end &&
2455                     (current->next == &map->header ||
2456                         current->end != current->next->start)) {
2457                         vm_map_unlock_read(map);
2458                         return (KERN_INVALID_ADDRESS);
2459                 }
2460         }
2461
2462         if (invalidate)
2463                 pmap_remove(vm_map_pmap(map), start, end);
2464
2465         /*
2466          * Make a second pass, cleaning/uncaching pages from the indicated
2467          * objects as we go.
2468          *
2469          * Hold vm_token to avoid blocking in vm_object_reference()
2470          */
2471         lwkt_gettoken(&vm_token);
2472         lwkt_gettoken(&vmobj_token);
2473
2474         for (current = entry; current->start < end; current = current->next) {
2475                 offset = current->offset + (start - current->start);
2476                 size = (end <= current->end ? end : current->end) - start;
2477                 if (current->maptype == VM_MAPTYPE_SUBMAP) {
2478                         vm_map_t smap;
2479                         vm_map_entry_t tentry;
2480                         vm_size_t tsize;
2481
2482                         smap = current->object.sub_map;
2483                         vm_map_lock_read(smap);
2484                         vm_map_lookup_entry(smap, offset, &tentry);
2485                         tsize = tentry->end - offset;
2486                         if (tsize < size)
2487                                 size = tsize;
2488                         object = tentry->object.vm_object;
2489                         offset = tentry->offset + (offset - tentry->start);
2490                         vm_map_unlock_read(smap);
2491                 } else {
2492                         object = current->object.vm_object;
2493                 }
2494                 /*
2495                  * Note that there is absolutely no sense in writing out
2496                  * anonymous objects, so we track down the vnode object
2497                  * to write out.
2498                  * We invalidate (remove) all pages from the address space
2499                  * anyway, for semantic correctness.
2500                  *
2501                  * note: certain anonymous maps, such as MAP_NOSYNC maps,
2502                  * may start out with a NULL object.
2503                  */
2504                 while (object && object->backing_object) {
2505                         offset += object->backing_object_offset;
2506                         object = object->backing_object;
2507                         if (object->size < OFF_TO_IDX( offset + size))
2508                                 size = IDX_TO_OFF(object->size) - offset;
2509                 }
2510                 if (object && (object->type == OBJT_VNODE) && 
2511                     (current->protection & VM_PROT_WRITE) &&
2512                     (object->flags & OBJ_NOMSYNC) == 0) {
2513                         /*
2514                          * Flush pages if writing is allowed, invalidate them
2515                          * if invalidation requested.  Pages undergoing I/O
2516                          * will be ignored by vm_object_page_remove().
2517                          *
2518                          * We cannot lock the vnode and then wait for paging
2519                          * to complete without deadlocking against vm_fault.
2520                          * Instead we simply call vm_object_page_remove() and
2521                          * allow it to block internally on a page-by-page 
2522                          * basis when it encounters pages undergoing async 
2523                          * I/O.
2524                          */
2525                         int flags;
2526
2527                         vm_object_reference_locked(object);
2528                         vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY);
2529                         flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
2530                         flags |= invalidate ? OBJPC_INVAL : 0;
2531
2532                         /*
2533                          * When operating on a virtual page table just
2534                          * flush the whole object.  XXX we probably ought
2535                          * to 
2536                          */
2537                         switch(current->maptype) {
2538                         case VM_MAPTYPE_NORMAL:
2539                                 vm_object_page_clean(object,
2540                                     OFF_TO_IDX(offset),
2541                                     OFF_TO_IDX(offset + size + PAGE_MASK),
2542                                     flags);
2543                                 break;
2544                         case VM_MAPTYPE_VPAGETABLE:
2545                                 vm_object_page_clean(object, 0, 0, flags);
2546                                 break;
2547                         }
2548                         vn_unlock(((struct vnode *)object->handle));
2549                         vm_object_deallocate_locked(object);
2550                 }
2551                 if (object && invalidate &&
2552                    ((object->type == OBJT_VNODE) ||
2553                     (object->type == OBJT_DEVICE))) {
2554                         int clean_only = 
2555                                 (object->type == OBJT_DEVICE) ? FALSE : TRUE;
2556                         vm_object_reference_locked(object);
2557                         switch(current->maptype) {
2558                         case VM_MAPTYPE_NORMAL:
2559                                 vm_object_page_remove(object,
2560                                     OFF_TO_IDX(offset),
2561                                     OFF_TO_IDX(offset + size + PAGE_MASK),
2562                                     clean_only);
2563                                 break;
2564                         case VM_MAPTYPE_VPAGETABLE:
2565                                 vm_object_page_remove(object, 0, 0, clean_only);
2566                                 break;
2567                         }
2568                         vm_object_deallocate_locked(object);
2569                 }
2570                 start += size;
2571         }
2572
2573         lwkt_reltoken(&vmobj_token);
2574         lwkt_reltoken(&vm_token);
2575         vm_map_unlock_read(map);
2576
2577         return (KERN_SUCCESS);
2578 }
2579
2580 /*
2581  * Make the region specified by this entry pageable.
2582  *
2583  * The vm_map must be exclusively locked.
2584  */
2585 static void 
2586 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
2587 {
2588         entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2589         entry->wired_count = 0;
2590         vm_fault_unwire(map, entry);
2591 }
2592
2593 /*
2594  * Deallocate the given entry from the target map.
2595  *
2596  * The vm_map must be exclusively locked.
2597  */
2598 static void
2599 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry, int *countp)
2600 {
2601         vm_map_entry_unlink(map, entry);
2602         map->size -= entry->end - entry->start;
2603
2604         switch(entry->maptype) {
2605         case VM_MAPTYPE_NORMAL:
2606         case VM_MAPTYPE_VPAGETABLE:
2607                 vm_object_deallocate(entry->object.vm_object);
2608                 break;
2609         default:
2610                 break;
2611         }
2612
2613         vm_map_entry_dispose(map, entry, countp);
2614 }
2615
2616 /*
2617  * Deallocates the given address range from the target map.
2618  *
2619  * The vm_map must be exclusively locked.
2620  */
2621 int
2622 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end, int *countp)
2623 {
2624         vm_object_t object;
2625         vm_map_entry_t entry;
2626         vm_map_entry_t first_entry;
2627
2628         ASSERT_VM_MAP_LOCKED(map);
2629 again:
2630         /*
2631          * Find the start of the region, and clip it.  Set entry to point
2632          * at the first record containing the requested address or, if no
2633          * such record exists, the next record with a greater address.  The
2634          * loop will run from this point until a record beyond the termination
2635          * address is encountered.
2636          *
2637          * map->hint must be adjusted to not point to anything we delete,
2638          * so set it to the entry prior to the one being deleted.
2639          *
2640          * GGG see other GGG comment.
2641          */
2642         if (vm_map_lookup_entry(map, start, &first_entry)) {
2643                 entry = first_entry;
2644                 vm_map_clip_start(map, entry, start, countp);
2645                 map->hint = entry->prev;        /* possible problem XXX */
2646         } else {
2647                 map->hint = first_entry;        /* possible problem XXX */
2648                 entry = first_entry->next;
2649         }
2650
2651         /*
2652          * If a hole opens up prior to the current first_free then
2653          * adjust first_free.  As with map->hint, map->first_free
2654          * cannot be left set to anything we might delete.
2655          */
2656         if (entry == &map->header) {
2657                 map->first_free = &map->header;
2658         } else if (map->first_free->start >= start) {
2659                 map->first_free = entry->prev;
2660         }
2661
2662         /*
2663          * Step through all entries in this region
2664          */
2665         while ((entry != &map->header) && (entry->start < end)) {
2666                 vm_map_entry_t next;
2667                 vm_offset_t s, e;
2668                 vm_pindex_t offidxstart, offidxend, count;
2669
2670                 /*
2671                  * If we hit an in-transition entry we have to sleep and
2672                  * retry.  It's easier (and not really slower) to just retry
2673                  * since this case occurs so rarely and the hint is already
2674                  * pointing at the right place.  We have to reset the
2675                  * start offset so as not to accidently delete an entry
2676                  * another process just created in vacated space.
2677                  */
2678                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2679                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2680                         start = entry->start;
2681                         ++mycpu->gd_cnt.v_intrans_coll;
2682                         ++mycpu->gd_cnt.v_intrans_wait;
2683                         vm_map_transition_wait(map);
2684                         goto again;
2685                 }
2686                 vm_map_clip_end(map, entry, end, countp);
2687
2688                 s = entry->start;
2689                 e = entry->end;
2690                 next = entry->next;
2691
2692                 offidxstart = OFF_TO_IDX(entry->offset);
2693                 count = OFF_TO_IDX(e - s);
2694                 object = entry->object.vm_object;
2695
2696                 /*
2697                  * Unwire before removing addresses from the pmap; otherwise,
2698                  * unwiring will put the entries back in the pmap.
2699                  */
2700                 if (entry->wired_count != 0)
2701                         vm_map_entry_unwire(map, entry);
2702
2703                 offidxend = offidxstart + count;
2704
2705                 /*
2706                  * Hold vm_token when manipulating vm_objects,
2707                  *
2708                  * Hold vmobj_token when potentially adding or removing
2709                  * objects (collapse requires both).
2710                  */
2711                 lwkt_gettoken(&vm_token);
2712                 lwkt_gettoken(&vmobj_token);
2713
2714                 if (object == &kernel_object) {
2715                         vm_object_page_remove(object, offidxstart,
2716                                               offidxend, FALSE);
2717                 } else {
2718                         pmap_remove(map->pmap, s, e);
2719
2720                         if (object != NULL &&
2721                             object->ref_count != 1 &&
2722                             (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) ==
2723                              OBJ_ONEMAPPING &&
2724                             (object->type == OBJT_DEFAULT ||
2725                              object->type == OBJT_SWAP)) {
2726                                 vm_object_collapse(object);
2727                                 vm_object_page_remove(object, offidxstart,
2728                                                       offidxend, FALSE);
2729                                 if (object->type == OBJT_SWAP) {
2730                                         swap_pager_freespace(object,
2731                                                              offidxstart,
2732                                                              count);
2733                                 }
2734                                 if (offidxend >= object->size &&
2735                                     offidxstart < object->size) {
2736                                         object->size = offidxstart;
2737                                 }
2738                         }
2739                 }
2740                 lwkt_reltoken(&vmobj_token);
2741                 lwkt_reltoken(&vm_token);
2742
2743                 /*
2744                  * Delete the entry (which may delete the object) only after
2745                  * removing all pmap entries pointing to its pages.
2746                  * (Otherwise, its page frames may be reallocated, and any
2747                  * modify bits will be set in the wrong object!)
2748                  */
2749                 vm_map_entry_delete(map, entry, countp);
2750                 entry = next;
2751         }
2752         return (KERN_SUCCESS);
2753 }
2754
2755 /*
2756  * Remove the given address range from the target map.
2757  * This is the exported form of vm_map_delete.
2758  *
2759  * No requirements.
2760  */
2761 int
2762 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
2763 {
2764         int result;
2765         int count;
2766
2767         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2768         vm_map_lock(map);
2769         VM_MAP_RANGE_CHECK(map, start, end);
2770         result = vm_map_delete(map, start, end, &count);
2771         vm_map_unlock(map);
2772         vm_map_entry_release(count);
2773
2774         return (result);
2775 }
2776
2777 /*
2778  * Assert that the target map allows the specified privilege on the
2779  * entire address region given.  The entire region must be allocated.
2780  *
2781  * The caller must specify whether the vm_map is already locked or not.
2782  */
2783 boolean_t
2784 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
2785                         vm_prot_t protection, boolean_t have_lock)
2786 {
2787         vm_map_entry_t entry;
2788         vm_map_entry_t tmp_entry;
2789         boolean_t result;
2790
2791         if (have_lock == FALSE)
2792                 vm_map_lock_read(map);
2793
2794         if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
2795                 if (have_lock == FALSE)
2796                         vm_map_unlock_read(map);
2797                 return (FALSE);
2798         }
2799         entry = tmp_entry;
2800
2801         result = TRUE;
2802         while (start < end) {
2803                 if (entry == &map->header) {
2804                         result = FALSE;
2805                         break;
2806                 }
2807                 /*
2808                  * No holes allowed!
2809                  */
2810
2811                 if (start < entry->start) {
2812                         result = FALSE;
2813                         break;
2814                 }
2815                 /*
2816                  * Check protection associated with entry.
2817                  */
2818
2819                 if ((entry->protection & protection) != protection) {
2820                         result = FALSE;
2821                         break;
2822                 }
2823                 /* go to next entry */
2824
2825                 start = entry->end;
2826                 entry = entry->next;
2827         }
2828         if (have_lock == FALSE)
2829                 vm_map_unlock_read(map);
2830         return (result);
2831 }
2832
2833 /*
2834  * Split the pages in a map entry into a new object.  This affords
2835  * easier removal of unused pages, and keeps object inheritance from
2836  * being a negative impact on memory usage.
2837  *
2838  * The vm_map must be exclusively locked.
2839  * The orig_object should be held.
2840  */
2841 static void
2842 vm_map_split(vm_map_entry_t entry)
2843 {
2844         vm_page_t m;
2845         vm_object_t orig_object, new_object, source;
2846         vm_offset_t s, e;
2847         vm_pindex_t offidxstart, offidxend, idx;
2848         vm_size_t size;
2849         vm_ooffset_t offset;
2850
2851         orig_object = entry->object.vm_object;
2852         if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP)
2853                 return;
2854         if (orig_object->ref_count <= 1)
2855                 return;
2856
2857         offset = entry->offset;
2858         s = entry->start;
2859         e = entry->end;
2860
2861         offidxstart = OFF_TO_IDX(offset);
2862         offidxend = offidxstart + OFF_TO_IDX(e - s);
2863         size = offidxend - offidxstart;
2864
2865         switch(orig_object->type) {
2866         case OBJT_DEFAULT:
2867                 new_object = default_pager_alloc(NULL, IDX_TO_OFF(size),
2868                                                  VM_PROT_ALL, 0);
2869                 break;
2870         case OBJT_SWAP:
2871                 new_object = swap_pager_alloc(NULL, IDX_TO_OFF(size),
2872                                               VM_PROT_ALL, 0);
2873                 break;
2874         default:
2875                 /* not reached */
2876                 new_object = NULL;
2877                 KKASSERT(0);
2878         }
2879         if (new_object == NULL)
2880                 return;
2881
2882         /*
2883          * vm_token required when manipulating vm_objects.
2884          */
2885         lwkt_gettoken(&vm_token);
2886         lwkt_gettoken(&vmobj_token);
2887
2888         vm_object_hold(new_object);
2889
2890         source = orig_object->backing_object;
2891         if (source != NULL) {
2892                 vm_object_hold(source);
2893                 /* Referenced by new_object */
2894                 vm_object_reference_locked(source);
2895                 LIST_INSERT_HEAD(&source->shadow_head,
2896                                  new_object, shadow_list);
2897                 vm_object_clear_flag(source, OBJ_ONEMAPPING);
2898                 new_object->backing_object_offset = 
2899                         orig_object->backing_object_offset +
2900                         IDX_TO_OFF(offidxstart);
2901                 new_object->backing_object = source;
2902                 source->shadow_count++;
2903                 source->generation++;
2904                 vm_object_drop(source);
2905         }
2906
2907         for (idx = 0; idx < size; idx++) {
2908                 vm_page_t m;
2909
2910         retry:
2911                 m = vm_page_lookup(orig_object, offidxstart + idx);
2912                 if (m == NULL)
2913                         continue;
2914
2915                 /*
2916                  * We must wait for pending I/O to complete before we can
2917                  * rename the page.
2918                  *
2919                  * We do not have to VM_PROT_NONE the page as mappings should
2920                  * not be changed by this operation.
2921                  */
2922                 if (vm_page_sleep_busy(m, TRUE, "spltwt"))
2923                         goto retry;
2924                 vm_page_busy(m);
2925                 vm_page_rename(m, new_object, idx);
2926                 /* page automatically made dirty by rename and cache handled */
2927                 vm_page_busy(m);
2928         }
2929
2930         if (orig_object->type == OBJT_SWAP) {
2931                 vm_object_pip_add(orig_object, 1);
2932                 /*
2933                  * copy orig_object pages into new_object
2934                  * and destroy unneeded pages in
2935                  * shadow object.
2936                  */
2937                 swap_pager_copy(orig_object, new_object, offidxstart, 0);
2938                 vm_object_pip_wakeup(orig_object);
2939         }
2940
2941         /*
2942          * Wakeup the pages we played with.  No spl protection is needed
2943          * for a simple wakeup.
2944          */
2945         for (idx = 0; idx < size; idx++) {
2946                 m = vm_page_lookup(new_object, idx);
2947                 if (m)
2948                         vm_page_wakeup(m);
2949         }
2950
2951         entry->object.vm_object = new_object;
2952         entry->offset = 0LL;
2953         vm_object_deallocate_locked(orig_object);
2954         vm_object_drop(new_object);
2955         lwkt_reltoken(&vmobj_token);
2956         lwkt_reltoken(&vm_token);
2957 }
2958
2959 /*
2960  * Copies the contents of the source entry to the destination
2961  * entry.  The entries *must* be aligned properly.
2962  *
2963  * The vm_map must be exclusively locked.
2964  * vm_token must be held
2965  */
2966 static void
2967 vm_map_copy_entry(vm_map_t src_map, vm_map_t dst_map,
2968         vm_map_entry_t src_entry, vm_map_entry_t dst_entry)
2969 {
2970         vm_object_t src_object;
2971
2972         if (dst_entry->maptype == VM_MAPTYPE_SUBMAP)
2973                 return;
2974         if (src_entry->maptype == VM_MAPTYPE_SUBMAP)
2975                 return;
2976
2977         ASSERT_LWKT_TOKEN_HELD(&vm_token);
2978         lwkt_gettoken(&vmobj_token);            /* required for collapse */
2979
2980         if (src_entry->wired_count == 0) {
2981                 /*
2982                  * If the source entry is marked needs_copy, it is already
2983                  * write-protected.
2984                  */
2985                 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
2986                         pmap_protect(src_map->pmap,
2987                             src_entry->start,
2988                             src_entry->end,
2989                             src_entry->protection & ~VM_PROT_WRITE);
2990                 }
2991
2992                 /*
2993                  * Make a copy of the object.
2994                  */
2995                 if ((src_object = src_entry->object.vm_object) != NULL) {
2996                         if ((src_object->handle == NULL) &&
2997                                 (src_object->type == OBJT_DEFAULT ||
2998                                  src_object->type == OBJT_SWAP)) {
2999                                 vm_object_collapse(src_object);
3000                                 if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
3001                                         vm_map_split(src_entry);
3002                                         src_object = src_entry->object.vm_object;
3003                                 }
3004                         }
3005
3006                         vm_object_reference_locked(src_object);
3007                         vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
3008                         dst_entry->object.vm_object = src_object;
3009                         src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
3010                         dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
3011                         dst_entry->offset = src_entry->offset;
3012                 } else {
3013                         dst_entry->object.vm_object = NULL;
3014                         dst_entry->offset = 0;
3015                 }
3016
3017                 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
3018                     dst_entry->end - dst_entry->start, src_entry->start);
3019         } else {
3020                 /*
3021                  * Of course, wired down pages can't be set copy-on-write.
3022                  * Cause wired pages to be copied into the new map by
3023                  * simulating faults (the new pages are pageable)
3024                  */
3025                 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
3026         }
3027         lwkt_reltoken(&vmobj_token);
3028 }
3029
3030 /*
3031  * vmspace_fork:
3032  * Create a new process vmspace structure and vm_map
3033  * based on those of an existing process.  The new map
3034  * is based on the old map, according to the inheritance
3035  * values on the regions in that map.
3036  *
3037  * The source map must not be locked.
3038  * No requirements.
3039  */
3040 struct vmspace *
3041 vmspace_fork(struct vmspace *vm1)
3042 {
3043         struct vmspace *vm2;
3044         vm_map_t old_map = &vm1->vm_map;
3045         vm_map_t new_map;
3046         vm_map_entry_t old_entry;
3047         vm_map_entry_t new_entry;
3048         vm_object_t object;
3049         int count;
3050
3051         lwkt_gettoken(&vm_token);
3052         lwkt_gettoken(&vmspace_token);
3053         lwkt_gettoken(&vmobj_token);
3054         vm_map_lock(old_map);
3055
3056         /*
3057          * XXX Note: upcalls are not copied.
3058          */
3059         vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
3060         bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
3061             (caddr_t)&vm1->vm_endcopy - (caddr_t)&vm1->vm_startcopy);
3062         new_map = &vm2->vm_map; /* XXX */
3063         new_map->timestamp = 1;
3064
3065         vm_map_lock(new_map);
3066
3067         count = 0;
3068         old_entry = old_map->header.next;
3069         while (old_entry != &old_map->header) {
3070                 ++count;
3071                 old_entry = old_entry->next;
3072         }
3073
3074         count = vm_map_entry_reserve(count + MAP_RESERVE_COUNT);
3075
3076         old_entry = old_map->header.next;
3077         while (old_entry != &old_map->header) {
3078                 if (old_entry->maptype == VM_MAPTYPE_SUBMAP)
3079                         panic("vm_map_fork: encountered a submap");
3080
3081                 switch (old_entry->inheritance) {
3082                 case VM_INHERIT_NONE:
3083                         break;
3084                 case VM_INHERIT_SHARE:
3085                         /*
3086                          * Clone the entry, creating the shared object if
3087                          * necessary.
3088                          */
3089                         object = old_entry->object.vm_object;
3090                         if (object == NULL) {
3091                                 vm_map_entry_allocate_object(old_entry);
3092                                 object = old_entry->object.vm_object;
3093                         }
3094
3095                         /*
3096                          * Add the reference before calling vm_map_entry_shadow
3097                          * to insure that a shadow object is created.
3098                          */
3099                         vm_object_reference_locked(object);
3100                         if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3101                                 vm_map_entry_shadow(old_entry);
3102                                 /* Transfer the second reference too. */
3103                                 vm_object_reference_locked(
3104                                     old_entry->object.vm_object);
3105                                 vm_object_deallocate_locked(object);
3106                                 object = old_entry->object.vm_object;
3107                         }
3108                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
3109
3110                         /*
3111                          * Clone the entry, referencing the shared object.
3112                          */
3113                         new_entry = vm_map_entry_create(new_map, &count);
3114                         *new_entry = *old_entry;
3115                         new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3116                         new_entry->wired_count = 0;
3117
3118                         /*
3119                          * Insert the entry into the new map -- we know we're
3120                          * inserting at the end of the new map.
3121                          */
3122
3123                         vm_map_entry_link(new_map, new_map->header.prev,
3124                                           new_entry);
3125
3126                         /*
3127                          * Update the physical map
3128                          */
3129                         pmap_copy(new_map->pmap, old_map->pmap,
3130                             new_entry->start,
3131                             (old_entry->end - old_entry->start),
3132                             old_entry->start);
3133                         break;
3134                 case VM_INHERIT_COPY:
3135                         /*
3136                          * Clone the entry and link into the map.
3137                          */
3138                         new_entry = vm_map_entry_create(new_map, &count);
3139                         *new_entry = *old_entry;
3140                         new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3141                         new_entry->wired_count = 0;
3142                         new_entry->object.vm_object = NULL;
3143                         vm_map_entry_link(new_map, new_map->header.prev,
3144                                           new_entry);
3145                         vm_map_copy_entry(old_map, new_map, old_entry,
3146                                           new_entry);
3147                         break;
3148                 }
3149                 old_entry = old_entry->next;
3150         }
3151
3152         new_map->size = old_map->size;
3153         vm_map_unlock(old_map);
3154         vm_map_unlock(new_map);
3155         vm_map_entry_release(count);
3156
3157         lwkt_reltoken(&vmobj_token);
3158         lwkt_reltoken(&vmspace_token);
3159         lwkt_reltoken(&vm_token);
3160
3161         return (vm2);
3162 }
3163
3164 /*
3165  * Create an auto-grow stack entry
3166  *
3167  * No requirements.
3168  */
3169 int
3170 vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
3171               int flags, vm_prot_t prot, vm_prot_t max, int cow)
3172 {
3173         vm_map_entry_t  prev_entry;
3174         vm_map_entry_t  new_stack_entry;
3175         vm_size_t       init_ssize;
3176         int             rv;
3177         int             count;
3178         vm_offset_t     tmpaddr;
3179
3180         cow |= MAP_IS_STACK;
3181
3182         if (max_ssize < sgrowsiz)
3183                 init_ssize = max_ssize;
3184         else
3185                 init_ssize = sgrowsiz;
3186
3187         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
3188         vm_map_lock(map);
3189
3190         /*
3191          * Find space for the mapping
3192          */
3193         if ((flags & (MAP_FIXED | MAP_TRYFIXED)) == 0) {
3194                 if (vm_map_findspace(map, addrbos, max_ssize, 1,
3195                                      flags, &tmpaddr)) {
3196                         vm_map_unlock(map);
3197                         vm_map_entry_release(count);
3198                         return (KERN_NO_SPACE);
3199                 }
3200                 addrbos = tmpaddr;
3201         }
3202
3203         /* If addr is already mapped, no go */
3204         if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
3205                 vm_map_unlock(map);
3206                 vm_map_entry_release(count);
3207                 return (KERN_NO_SPACE);
3208         }
3209
3210 #if 0
3211         /* XXX already handled by kern_mmap() */
3212         /* If we would blow our VMEM resource limit, no go */
3213         if (map->size + init_ssize >
3214             curproc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
3215                 vm_map_unlock(map);
3216                 vm_map_entry_release(count);
3217                 return (KERN_NO_SPACE);
3218         }
3219 #endif
3220
3221         /*
3222          * If we can't accomodate max_ssize in the current mapping,
3223          * no go.  However, we need to be aware that subsequent user
3224          * mappings might map into the space we have reserved for
3225          * stack, and currently this space is not protected.  
3226          * 
3227          * Hopefully we will at least detect this condition 
3228          * when we try to grow the stack.
3229          */
3230         if ((prev_entry->next != &map->header) &&
3231             (prev_entry->next->start < addrbos + max_ssize)) {
3232                 vm_map_unlock(map);
3233                 vm_map_entry_release(count);
3234                 return (KERN_NO_SPACE);
3235         }
3236
3237         /*
3238          * We initially map a stack of only init_ssize.  We will
3239          * grow as needed later.  Since this is to be a grow 
3240          * down stack, we map at the top of the range.
3241          *
3242          * Note: we would normally expect prot and max to be
3243          * VM_PROT_ALL, and cow to be 0.  Possibly we should
3244          * eliminate these as input parameters, and just
3245          * pass these values here in the insert call.
3246          */
3247         rv = vm_map_insert(map, &count,
3248                            NULL, 0, addrbos + max_ssize - init_ssize,
3249                            addrbos + max_ssize,
3250                            VM_MAPTYPE_NORMAL,
3251                            prot, max,
3252                            cow);
3253
3254         /* Now set the avail_ssize amount */
3255         if (rv == KERN_SUCCESS) {
3256                 if (prev_entry != &map->header)
3257                         vm_map_clip_end(map, prev_entry, addrbos + max_ssize - init_ssize, &count);
3258                 new_stack_entry = prev_entry->next;
3259                 if (new_stack_entry->end   != addrbos + max_ssize ||
3260                     new_stack_entry->start != addrbos + max_ssize - init_ssize)
3261                         panic ("Bad entry start/end for new stack entry");
3262                 else 
3263                         new_stack_entry->aux.avail_ssize = max_ssize - init_ssize;
3264         }
3265
3266         vm_map_unlock(map);
3267         vm_map_entry_release(count);
3268         return (rv);
3269 }
3270
3271 /*
3272  * Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
3273  * desired address is already mapped, or if we successfully grow
3274  * the stack.  Also returns KERN_SUCCESS if addr is outside the
3275  * stack range (this is strange, but preserves compatibility with
3276  * the grow function in vm_machdep.c).
3277  *
3278  * No requirements.
3279  */
3280 int
3281 vm_map_growstack (struct proc *p, vm_offset_t addr)
3282 {
3283         vm_map_entry_t prev_entry;
3284         vm_map_entry_t stack_entry;
3285         vm_map_entry_t new_stack_entry;
3286         struct vmspace *vm = p->p_vmspace;
3287         vm_map_t map = &vm->vm_map;
3288         vm_offset_t    end;
3289         int grow_amount;
3290         int rv = KERN_SUCCESS;
3291         int is_procstack;
3292         int use_read_lock = 1;
3293         int count;
3294
3295         count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
3296 Retry:
3297         if (use_read_lock)
3298                 vm_map_lock_read(map);
3299         else
3300                 vm_map_lock(map);
3301
3302         /* If addr is already in the entry range, no need to grow.*/
3303         if (vm_map_lookup_entry(map, addr, &prev_entry))
3304                 goto done;
3305
3306         if ((stack_entry = prev_entry->next) == &map->header)
3307                 goto done;
3308         if (prev_entry == &map->header) 
3309                 end = stack_entry->start - stack_entry->aux.avail_ssize;
3310         else
3311                 end = prev_entry->end;
3312
3313         /*
3314          * This next test mimics the old grow function in vm_machdep.c.
3315          * It really doesn't quite make sense, but we do it anyway
3316          * for compatibility.
3317          *
3318          * If not growable stack, return success.  This signals the
3319          * caller to proceed as he would normally with normal vm.
3320          */
3321         if (stack_entry->aux.avail_ssize < 1 ||
3322             addr >= stack_entry->start ||
3323             addr <  stack_entry->start - stack_entry->aux.avail_ssize) {
3324                 goto done;
3325         } 
3326         
3327         /* Find the minimum grow amount */
3328         grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE);
3329         if (grow_amount > stack_entry->aux.avail_ssize) {
3330                 rv = KERN_NO_SPACE;
3331                 goto done;
3332         }
3333
3334         /*
3335          * If there is no longer enough space between the entries
3336          * nogo, and adjust the available space.  Note: this 
3337          * should only happen if the user has mapped into the
3338          * stack area after the stack was created, and is
3339          * probably an error.
3340          *
3341          * This also effectively destroys any guard page the user
3342          * might have intended by limiting the stack size.
3343          */
3344         if (grow_amount > stack_entry->start - end) {
3345                 if (use_read_lock && vm_map_lock_upgrade(map)) {
3346                         use_read_lock = 0;
3347                         goto Retry;
3348                 }
3349                 use_read_lock = 0;
3350                 stack_entry->aux.avail_ssize = stack_entry->start - end;
3351                 rv = KERN_NO_SPACE;
3352                 goto done;
3353         }
3354
3355         is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr;
3356
3357         /* If this is the main process stack, see if we're over the 
3358          * stack limit.
3359          */
3360         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
3361                              p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
3362                 rv = KERN_NO_SPACE;
3363                 goto done;
3364         }
3365
3366         /* Round up the grow amount modulo SGROWSIZ */
3367         grow_amount = roundup (grow_amount, sgrowsiz);
3368         if (grow_amount > stack_entry->aux.avail_ssize) {
3369                 grow_amount = stack_entry->aux.avail_ssize;
3370         }
3371         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
3372                              p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
3373                 grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
3374                               ctob(vm->vm_ssize);
3375         }
3376
3377         /* If we would blow our VMEM resource limit, no go */
3378         if (map->size + grow_amount > p->p_rlimit[RLIMIT_VMEM].rlim_cur) {
3379                 rv = KERN_NO_SPACE;
3380                 goto done;
3381         }
3382
3383         if (use_read_lock && vm_map_lock_upgrade(map)) {
3384                 use_read_lock = 0;
3385                 goto Retry;
3386         }
3387         use_read_lock = 0;
3388
3389         /* Get the preliminary new entry start value */
3390         addr = stack_entry->start - grow_amount;
3391
3392         /* If this puts us into the previous entry, cut back our growth
3393          * to the available space.  Also, see the note above.
3394          */
3395         if (addr < end) {
3396                 stack_entry->aux.avail_ssize = stack_entry->start - end;
3397                 addr = end;
3398         }
3399
3400         rv = vm_map_insert(map, &count,
3401                            NULL, 0, addr, stack_entry->start,
3402                            VM_MAPTYPE_NORMAL,
3403                            VM_PROT_ALL, VM_PROT_ALL,
3404                            0);
3405
3406         /* Adjust the available stack space by the amount we grew. */
3407         if (rv == KERN_SUCCESS) {
3408                 if (prev_entry != &map->header)
3409                         vm_map_clip_end(map, prev_entry, addr, &count);
3410                 new_stack_entry = prev_entry->next;
3411                 if (new_stack_entry->end   != stack_entry->start  ||
3412                     new_stack_entry->start != addr)
3413                         panic ("Bad stack grow start/end in new stack entry");
3414                 else {
3415                         new_stack_entry->aux.avail_ssize =
3416                                 stack_entry->aux.avail_ssize -
3417                                 (new_stack_entry->end - new_stack_entry->start);
3418                         if (is_procstack)
3419                                 vm->vm_ssize += btoc(new_stack_entry->end -
3420                                                      new_stack_entry->start);
3421                 }
3422
3423                 if (map->flags & MAP_WIREFUTURE)
3424                         vm_map_unwire(map, new_stack_entry->start,
3425                                       new_stack_entry->end, FALSE);
3426         }
3427
3428 done:
3429         if (use_read_lock)
3430                 vm_map_unlock_read(map);
3431         else
3432                 vm_map_unlock(map);
3433         vm_map_entry_release(count);
3434         return (rv);
3435 }
3436
3437 /*
3438  * Unshare the specified VM space for exec.  If other processes are
3439  * mapped to it, then create a new one.  The new vmspace is null.
3440  *
3441  * No requirements.
3442  */
3443 void
3444 vmspace_exec(struct proc *p, struct vmspace *vmcopy) 
3445 {
3446         struct vmspace *oldvmspace = p->p_vmspace;
3447         struct vmspace *newvmspace;
3448         vm_map_t map = &p->p_vmspace->vm_map;
3449
3450         /*
3451          * If we are execing a resident vmspace we fork it, otherwise
3452          * we create a new vmspace.  Note that exitingcnt and upcalls
3453          * are not copied to the new vmspace.
3454          */
3455         lwkt_gettoken(&vmspace_token);
3456         if (vmcopy)  {
3457                 newvmspace = vmspace_fork(vmcopy);
3458         } else {
3459                 newvmspace = vmspace_alloc(map->min_offset, map->max_offset);
3460                 bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
3461                       (caddr_t)&oldvmspace->vm_endcopy -
3462                        (caddr_t)&oldvmspace->vm_startcopy);
3463         }
3464
3465         /*
3466          * Finish initializing the vmspace before assigning it
3467          * to the process.  The vmspace will become the current vmspace
3468          * if p == curproc.
3469          */
3470         pmap_pinit2(vmspace_pmap(newvmspace));
3471         pmap_replacevm(p, newvmspace, 0);
3472         sysref_put(&oldvmspace->vm_sysref);
3473         lwkt_reltoken(&vmspace_token);
3474 }
3475
3476 /*
3477  * Unshare the specified VM space for forcing COW.  This
3478  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
3479  *
3480  * The exitingcnt test is not strictly necessary but has been
3481  * included for code sanity (to make the code a bit more deterministic).
3482  */
3483 void
3484 vmspace_unshare(struct proc *p) 
3485 {
3486         struct vmspace *oldvmspace = p->p_vmspace;
3487         struct vmspace *newvmspace;
3488
3489         lwkt_gettoken(&vmspace_token);
3490         if (oldvmspace->vm_sysref.refcnt == 1 && oldvmspace->vm_exitingcnt == 0)
3491                 return;
3492         newvmspace = vmspace_fork(oldvmspace);
3493         pmap_pinit2(vmspace_pmap(newvmspace));
3494         pmap_replacevm(p, newvmspace, 0);
3495         sysref_put(&oldvmspace->vm_sysref);
3496         lwkt_reltoken(&vmspace_token);
3497 }
3498
3499 /*
3500  * vm_map_hint: return the beginning of the best area suitable for
3501  * creating a new mapping with "prot" protection.
3502  *
3503  * No requirements.
3504  */
3505 vm_offset_t
3506 vm_map_hint(struct proc *p, vm_offset_t addr, vm_prot_t prot)
3507 {
3508         struct vmspace *vms = p->p_vmspace;
3509
3510         if (!randomize_mmap) {
3511                 /*
3512                  * Set a reasonable start point for the hint if it was
3513                  * not specified or if it falls within the heap space.
3514                  * Hinted mmap()s do not allocate out of the heap space.
3515                  */
3516                 if (addr == 0 ||
3517                     (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
3518                      addr < round_page((vm_offset_t)vms->vm_daddr + maxdsiz))) {
3519                         addr = round_page((vm_offset_t)vms->vm_daddr + maxdsiz);
3520                 }
3521
3522                 return addr;
3523         }
3524
3525         if (addr != 0 && addr >= (vm_offset_t)vms->vm_daddr)
3526                 return addr;
3527
3528 #ifdef notyet
3529 #ifdef __i386__
3530         /*
3531          * If executable skip first two pages, otherwise start
3532          * after data + heap region.
3533          */
3534         if ((prot & VM_PROT_EXECUTE) &&
3535             ((vm_offset_t)vms->vm_daddr >= I386_MAX_EXE_ADDR)) {
3536                 addr = (PAGE_SIZE * 2) +
3537                     (karc4random() & (I386_MAX_EXE_ADDR / 2 - 1));
3538                 return (round_page(addr));
3539         }
3540 #endif /* __i386__ */
3541 #endif /* notyet */
3542
3543         addr = (vm_offset_t)vms->vm_daddr + MAXDSIZ;
3544         addr += karc4random() & (MIN((256 * 1024 * 1024), MAXDSIZ) - 1);
3545
3546         return (round_page(addr));
3547 }
3548
3549 /*
3550  * Finds the VM object, offset, and protection for a given virtual address
3551  * in the specified map, assuming a page fault of the type specified.
3552  *
3553  * Leaves the map in question locked for read; return values are guaranteed
3554  * until a vm_map_lookup_done call is performed.  Note that the map argument
3555  * is in/out; the returned map must be used in the call to vm_map_lookup_done.
3556  *
3557  * A handle (out_entry) is returned for use in vm_map_lookup_done, to make
3558  * that fast.
3559  *
3560  * If a lookup is requested with "write protection" specified, the map may
3561  * be changed to perform virtual copying operations, although the data
3562  * referenced will remain the same.
3563  *
3564  * No requirements.
3565  */
3566 int
3567 vm_map_lookup(vm_map_t *var_map,                /* IN/OUT */
3568               vm_offset_t vaddr,
3569               vm_prot_t fault_typea,
3570               vm_map_entry_t *out_entry,        /* OUT */
3571               vm_object_t *object,              /* OUT */
3572               vm_pindex_t *pindex,              /* OUT */
3573               vm_prot_t *out_prot,              /* OUT */
3574               boolean_t *wired)                 /* OUT */
3575 {
3576         vm_map_entry_t entry;
3577         vm_map_t map = *var_map;
3578         vm_prot_t prot;
3579         vm_prot_t fault_type = fault_typea;
3580         int use_read_lock = 1;
3581         int rv = KERN_SUCCESS;
3582
3583 RetryLookup:
3584         if (use_read_lock)
3585                 vm_map_lock_read(map);
3586         else
3587                 vm_map_lock(map);
3588
3589         /*
3590          * If the map has an interesting hint, try it before calling full
3591          * blown lookup routine.
3592          */
3593         entry = map->hint;
3594         *out_entry = entry;
3595
3596         if ((entry == &map->header) ||
3597             (vaddr < entry->start) || (vaddr >= entry->end)) {
3598                 vm_map_entry_t tmp_entry;
3599
3600                 /*
3601                  * Entry was either not a valid hint, or the vaddr was not
3602                  * contained in the entry, so do a full lookup.
3603                  */
3604                 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) {
3605                         rv = KERN_INVALID_ADDRESS;
3606                         goto done;
3607                 }
3608
3609                 entry = tmp_entry;
3610                 *out_entry = entry;
3611         }
3612         
3613         /*
3614          * Handle submaps.
3615          */
3616         if (entry->maptype == VM_MAPTYPE_SUBMAP) {
3617                 vm_map_t old_map = map;
3618
3619                 *var_map = map = entry->object.sub_map;
3620                 if (use_read_lock)
3621                         vm_map_unlock_read(old_map);
3622                 else
3623                         vm_map_unlock(old_map);
3624                 use_read_lock = 1;
3625                 goto RetryLookup;
3626         }
3627
3628         /*
3629          * Check whether this task is allowed to have this page.
3630          * Note the special case for MAP_ENTRY_COW
3631          * pages with an override.  This is to implement a forced
3632          * COW for debuggers.
3633          */
3634
3635         if (fault_type & VM_PROT_OVERRIDE_WRITE)
3636                 prot = entry->max_protection;
3637         else
3638                 prot = entry->protection;
3639
3640         fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
3641         if ((fault_type & prot) != fault_type) {
3642                 rv = KERN_PROTECTION_FAILURE;
3643                 goto done;
3644         }
3645
3646         if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
3647             (entry->eflags & MAP_ENTRY_COW) &&
3648             (fault_type & VM_PROT_WRITE) &&
3649             (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
3650                 rv = KERN_PROTECTION_FAILURE;
3651                 goto done;
3652         }
3653
3654         /*
3655          * If this page is not pageable, we have to get it for all possible
3656          * accesses.
3657          */
3658         *wired = (entry->wired_count != 0);
3659         if (*wired)
3660                 prot = fault_type = entry->protection;
3661
3662         /*
3663          * Virtual page tables may need to update the accessed (A) bit
3664          * in a page table entry.  Upgrade the fault to a write fault for
3665          * that case if the map will support it.  If the map does not support
3666          * it the page table entry simply will not be updated.
3667          */
3668         if (entry->maptype == VM_MAPTYPE_VPAGETABLE) {
3669                 if (prot & VM_PROT_WRITE)
3670                         fault_type |= VM_PROT_WRITE;
3671         }
3672
3673         /*
3674          * If the entry was copy-on-write, we either ...
3675          */
3676         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3677                 /*
3678                  * If we want to write the page, we may as well handle that
3679                  * now since we've got the map locked.
3680                  *
3681                  * If we don't need to write the page, we just demote the
3682                  * permissions allowed.
3683                  */
3684
3685                 if (fault_type & VM_PROT_WRITE) {
3686                         /*
3687                          * Make a new object, and place it in the object
3688                          * chain.  Note that no new references have appeared
3689                          * -- one just moved from the map to the new
3690                          * object.
3691                          */
3692
3693                         if (use_read_lock && vm_map_lock_upgrade(map)) {
3694                                 use_read_lock = 0;
3695                                 goto RetryLookup;
3696                         }
3697                         use_read_lock = 0;
3698
3699                         vm_map_entry_shadow(entry);
3700                 } else {
3701                         /*
3702                          * We're attempting to read a copy-on-write page --
3703                          * don't allow writes.
3704                          */
3705
3706                         prot &= ~VM_PROT_WRITE;
3707                 }
3708         }
3709
3710         /*
3711          * Create an object if necessary.
3712          */
3713         if (entry->object.vm_object == NULL &&
3714             !map->system_map) {
3715                 if (use_read_lock && vm_map_lock_upgrade(map))  {
3716                         use_read_lock = 0;
3717                         goto RetryLookup;
3718                 }
3719                 use_read_lock = 0;
3720                 vm_map_entry_allocate_object(entry);
3721         }
3722
3723         /*
3724          * Return the object/offset from this entry.  If the entry was
3725          * copy-on-write or empty, it has been fixed up.
3726          */
3727
3728         *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
3729         *object = entry->object.vm_object;
3730
3731         /*
3732          * Return whether this is the only map sharing this data.  On
3733          * success we return with a read lock held on the map.  On failure
3734          * we return with the map unlocked.
3735          */
3736         *out_prot = prot;
3737 done:
3738         if (rv == KERN_SUCCESS) {
3739                 if (use_read_lock == 0)
3740                         vm_map_lock_downgrade(map);
3741         } else if (use_read_lock) {
3742                 vm_map_unlock_read(map);
3743         } else {
3744                 vm_map_unlock(map);
3745         }
3746         return (rv);
3747 }
3748
3749 /*
3750  * Releases locks acquired by a vm_map_lookup()
3751  * (according to the handle returned by that lookup).
3752  *
3753  * No other requirements.
3754  */
3755 void
3756 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry, int count)
3757 {
3758         /*
3759          * Unlock the main-level map
3760          */
3761         vm_map_unlock_read(map);
3762         if (count)
3763                 vm_map_entry_release(count);
3764 }
3765
3766 #include "opt_ddb.h"
3767 #ifdef DDB
3768 #include <sys/kernel.h>
3769
3770 #include <ddb/ddb.h>
3771
3772 /*
3773  * Debugging only
3774  */
3775 DB_SHOW_COMMAND(map, vm_map_print)
3776 {
3777         static int nlines;
3778         /* XXX convert args. */
3779         vm_map_t map = (vm_map_t)addr;
3780         boolean_t full = have_addr;
3781
3782         vm_map_entry_t entry;
3783
3784         db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
3785             (void *)map,
3786             (void *)map->pmap, map->nentries, map->timestamp);
3787         nlines++;
3788
3789         if (!full && db_indent)
3790                 return;
3791
3792         db_indent += 2;
3793         for (entry = map->header.next; entry != &map->header;
3794             entry = entry->next) {
3795                 db_iprintf("map entry %p: start=%p, end=%p\n",
3796                     (void *)entry, (void *)entry->start, (void *)entry->end);
3797                 nlines++;
3798                 {
3799                         static char *inheritance_name[4] =
3800                         {"share", "copy", "none", "donate_copy"};
3801
3802                         db_iprintf(" prot=%x/%x/%s",
3803                             entry->protection,
3804                             entry->max_protection,
3805                             inheritance_name[(int)(unsigned char)entry->inheritance]);
3806                         if (entry->wired_count != 0)
3807                                 db_printf(", wired");
3808                 }
3809                 if (entry->maptype == VM_MAPTYPE_SUBMAP) {
3810                         /* XXX no %qd in kernel.  Truncate entry->offset. */
3811                         db_printf(", share=%p, offset=0x%lx\n",
3812                             (void *)entry->object.sub_map,
3813                             (long)entry->offset);
3814                         nlines++;
3815                         if ((entry->prev == &map->header) ||
3816                             (entry->prev->object.sub_map !=
3817                                 entry->object.sub_map)) {
3818                                 db_indent += 2;
3819                                 vm_map_print((db_expr_t)(intptr_t)
3820                                              entry->object.sub_map,
3821                                              full, 0, NULL);
3822                                 db_indent -= 2;
3823                         }
3824                 } else {
3825                         /* XXX no %qd in kernel.  Truncate entry->offset. */
3826                         db_printf(", object=%p, offset=0x%lx",
3827                             (void *)entry->object.vm_object,
3828                             (long)entry->offset);
3829                         if (entry->eflags & MAP_ENTRY_COW)
3830                                 db_printf(", copy (%s)",
3831                                     (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
3832                         db_printf("\n");
3833                         nlines++;
3834
3835                         if ((entry->prev == &map->header) ||
3836                             (entry->prev->object.vm_object !=
3837                                 entry->object.vm_object)) {
3838                                 db_indent += 2;
3839                                 vm_object_print((db_expr_t)(intptr_t)
3840                                                 entry->object.vm_object,
3841                                                 full, 0, NULL);
3842                                 nlines += 4;
3843                                 db_indent -= 2;
3844                         }
3845                 }
3846         }
3847         db_indent -= 2;
3848         if (db_indent == 0)
3849                 nlines = 0;
3850 }
3851
3852 /*
3853  * Debugging only
3854  */
3855 DB_SHOW_COMMAND(procvm, procvm)
3856 {
3857         struct proc *p;
3858
3859         if (have_addr) {
3860                 p = (struct proc *) addr;
3861         } else {
3862                 p = curproc;
3863         }
3864
3865         db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
3866             (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
3867             (void *)vmspace_pmap(p->p_vmspace));
3868
3869         vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
3870 }
3871
3872 #endif /* DDB */