Merge branch 'vendor/OPENBSD_LIBM'
[dragonfly.git] / sys / vm / vm_fault.c
1 /*
2  * Copyright (c) 2003-2014 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * ---
35  *
36  * Copyright (c) 1991, 1993
37  *      The Regents of the University of California.  All rights reserved.
38  * Copyright (c) 1994 John S. Dyson
39  * All rights reserved.
40  * Copyright (c) 1994 David Greenman
41  * All rights reserved.
42  *
43  *
44  * This code is derived from software contributed to Berkeley by
45  * The Mach Operating System project at Carnegie-Mellon University.
46  *
47  * Redistribution and use in source and binary forms, with or without
48  * modification, are permitted provided that the following conditions
49  * are met:
50  * 1. Redistributions of source code must retain the above copyright
51  *    notice, this list of conditions and the following disclaimer.
52  * 2. Redistributions in binary form must reproduce the above copyright
53  *    notice, this list of conditions and the following disclaimer in the
54  *    documentation and/or other materials provided with the distribution.
55  * 3. Neither the name of the University nor the names of its contributors
56  *    may be used to endorse or promote products derived from this software
57  *    without specific prior written permission.
58  *
59  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69  * SUCH DAMAGE.
70  *
71  * ---
72  *
73  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
74  * All rights reserved.
75  *
76  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
77  *
78  * Permission to use, copy, modify and distribute this software and
79  * its documentation is hereby granted, provided that both the copyright
80  * notice and this permission notice appear in all copies of the
81  * software, derivative works or modified versions, and any portions
82  * thereof, and that both notices appear in supporting documentation.
83  *
84  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
85  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
86  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
87  *
88  * Carnegie Mellon requests users of this software to return to
89  *
90  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
91  *  School of Computer Science
92  *  Carnegie Mellon University
93  *  Pittsburgh PA 15213-3890
94  *
95  * any improvements or extensions that they make and grant Carnegie the
96  * rights to redistribute these changes.
97  */
98
99 /*
100  *      Page fault handling module.
101  */
102
103 #include <sys/param.h>
104 #include <sys/systm.h>
105 #include <sys/kernel.h>
106 #include <sys/proc.h>
107 #include <sys/vnode.h>
108 #include <sys/resourcevar.h>
109 #include <sys/vmmeter.h>
110 #include <sys/vkernel.h>
111 #include <sys/lock.h>
112 #include <sys/sysctl.h>
113
114 #include <cpu/lwbuf.h>
115
116 #include <vm/vm.h>
117 #include <vm/vm_param.h>
118 #include <vm/pmap.h>
119 #include <vm/vm_map.h>
120 #include <vm/vm_object.h>
121 #include <vm/vm_page.h>
122 #include <vm/vm_pageout.h>
123 #include <vm/vm_kern.h>
124 #include <vm/vm_pager.h>
125 #include <vm/vnode_pager.h>
126 #include <vm/vm_extern.h>
127
128 #include <sys/thread2.h>
129 #include <vm/vm_page2.h>
130
131 struct faultstate {
132         vm_page_t m;
133         vm_object_t object;
134         vm_pindex_t pindex;
135         vm_prot_t prot;
136         vm_page_t first_m;
137         vm_object_t first_object;
138         vm_prot_t first_prot;
139         vm_map_t map;
140         vm_map_entry_t entry;
141         int lookup_still_valid;
142         int hardfault;
143         int fault_flags;
144         int map_generation;
145         int shared;
146         int first_shared;
147         boolean_t wired;
148         struct vnode *vp;
149 };
150
151 static int debug_fault = 0;
152 SYSCTL_INT(_vm, OID_AUTO, debug_fault, CTLFLAG_RW, &debug_fault, 0, "");
153 static int debug_cluster = 0;
154 SYSCTL_INT(_vm, OID_AUTO, debug_cluster, CTLFLAG_RW, &debug_cluster, 0, "");
155 int vm_shared_fault = 1;
156 TUNABLE_INT("vm.shared_fault", &vm_shared_fault);
157 SYSCTL_INT(_vm, OID_AUTO, shared_fault, CTLFLAG_RW, &vm_shared_fault, 0,
158            "Allow shared token on vm_object");
159 static long vm_shared_hit = 0;
160 SYSCTL_LONG(_vm, OID_AUTO, shared_hit, CTLFLAG_RW, &vm_shared_hit, 0,
161            "Successful shared faults");
162 static long vm_shared_count = 0;
163 SYSCTL_LONG(_vm, OID_AUTO, shared_count, CTLFLAG_RW, &vm_shared_count, 0,
164            "Shared fault attempts");
165 static long vm_shared_miss = 0;
166 SYSCTL_LONG(_vm, OID_AUTO, shared_miss, CTLFLAG_RW, &vm_shared_miss, 0,
167            "Unsuccessful shared faults");
168
169 static int vm_fault_object(struct faultstate *, vm_pindex_t, vm_prot_t, int);
170 static int vm_fault_vpagetable(struct faultstate *, vm_pindex_t *,
171                         vpte_t, int, int);
172 #if 0
173 static int vm_fault_additional_pages (vm_page_t, int, int, vm_page_t *, int *);
174 #endif
175 static void vm_set_nosync(vm_page_t m, vm_map_entry_t entry);
176 static void vm_prefault(pmap_t pmap, vm_offset_t addra,
177                         vm_map_entry_t entry, int prot, int fault_flags);
178 static void vm_prefault_quick(pmap_t pmap, vm_offset_t addra,
179                         vm_map_entry_t entry, int prot, int fault_flags);
180
181 static __inline void
182 release_page(struct faultstate *fs)
183 {
184         vm_page_deactivate(fs->m);
185         vm_page_wakeup(fs->m);
186         fs->m = NULL;
187 }
188
189 /*
190  * NOTE: Once unlocked any cached fs->entry becomes invalid, any reuse
191  *       requires relocking and then checking the timestamp.
192  *
193  * NOTE: vm_map_lock_read() does not bump fs->map->timestamp so we do
194  *       not have to update fs->map_generation here.
195  *
196  * NOTE: This function can fail due to a deadlock against the caller's
197  *       holding of a vm_page BUSY.
198  */
199 static __inline int
200 relock_map(struct faultstate *fs)
201 {
202         int error;
203
204         if (fs->lookup_still_valid == FALSE && fs->map) {
205                 error = vm_map_lock_read_to(fs->map);
206                 if (error == 0)
207                         fs->lookup_still_valid = TRUE;
208         } else {
209                 error = 0;
210         }
211         return error;
212 }
213
214 static __inline void
215 unlock_map(struct faultstate *fs)
216 {
217         if (fs->lookup_still_valid && fs->map) {
218                 vm_map_lookup_done(fs->map, fs->entry, 0);
219                 fs->lookup_still_valid = FALSE;
220         }
221 }
222
223 /*
224  * Clean up after a successful call to vm_fault_object() so another call
225  * to vm_fault_object() can be made.
226  */
227 static void
228 _cleanup_successful_fault(struct faultstate *fs, int relock)
229 {
230         /*
231          * We allocated a junk page for a COW operation that did
232          * not occur, the page must be freed.
233          */
234         if (fs->object != fs->first_object) {
235                 KKASSERT(fs->first_shared == 0);
236                 vm_page_free(fs->first_m);
237                 vm_object_pip_wakeup(fs->object);
238                 fs->first_m = NULL;
239         }
240
241         /*
242          * Reset fs->object.
243          */
244         fs->object = fs->first_object;
245         if (relock && fs->lookup_still_valid == FALSE) {
246                 if (fs->map)
247                         vm_map_lock_read(fs->map);
248                 fs->lookup_still_valid = TRUE;
249         }
250 }
251
252 static void
253 _unlock_things(struct faultstate *fs, int dealloc)
254 {
255         _cleanup_successful_fault(fs, 0);
256         if (dealloc) {
257                 /*vm_object_deallocate(fs->first_object);*/
258                 /*fs->first_object = NULL; drop used later on */
259         }
260         unlock_map(fs); 
261         if (fs->vp != NULL) { 
262                 vput(fs->vp);
263                 fs->vp = NULL;
264         }
265 }
266
267 #define unlock_things(fs) _unlock_things(fs, 0)
268 #define unlock_and_deallocate(fs) _unlock_things(fs, 1)
269 #define cleanup_successful_fault(fs) _cleanup_successful_fault(fs, 1)
270
271 /*
272  * TRYPAGER 
273  *
274  * Determine if the pager for the current object *might* contain the page.
275  *
276  * We only need to try the pager if this is not a default object (default
277  * objects are zero-fill and have no real pager), and if we are not taking
278  * a wiring fault or if the FS entry is wired.
279  */
280 #define TRYPAGER(fs)    \
281                 (fs->object->type != OBJT_DEFAULT && \
282                 (((fs->fault_flags & VM_FAULT_WIRE_MASK) == 0) || fs->wired))
283
284 /*
285  * vm_fault:
286  *
287  * Handle a page fault occuring at the given address, requiring the given
288  * permissions, in the map specified.  If successful, the page is inserted
289  * into the associated physical map.
290  *
291  * NOTE: The given address should be truncated to the proper page address.
292  *
293  * KERN_SUCCESS is returned if the page fault is handled; otherwise,
294  * a standard error specifying why the fault is fatal is returned.
295  *
296  * The map in question must be referenced, and remains so.
297  * The caller may hold no locks.
298  * No other requirements.
299  */
300 int
301 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags)
302 {
303         int result;
304         vm_pindex_t first_pindex;
305         struct faultstate fs;
306         struct lwp *lp;
307         int growstack;
308         int retry = 0;
309         int inherit_prot;
310
311         inherit_prot = fault_type & VM_PROT_NOSYNC;
312         vm_page_pcpu_cache();
313         fs.hardfault = 0;
314         fs.fault_flags = fault_flags;
315         fs.vp = NULL;
316         fs.shared = vm_shared_fault;
317         fs.first_shared = vm_shared_fault;
318         growstack = 1;
319         if (vm_shared_fault)
320                 ++vm_shared_count;
321
322         /*
323          * vm_map interactions
324          */
325         if ((lp = curthread->td_lwp) != NULL)
326                 lp->lwp_flags |= LWP_PAGING;
327         lwkt_gettoken(&map->token);
328
329 RetryFault:
330         /*
331          * Find the vm_map_entry representing the backing store and resolve
332          * the top level object and page index.  This may have the side
333          * effect of executing a copy-on-write on the map entry and/or
334          * creating a shadow object, but will not COW any actual VM pages.
335          *
336          * On success fs.map is left read-locked and various other fields 
337          * are initialized but not otherwise referenced or locked.
338          *
339          * NOTE!  vm_map_lookup will try to upgrade the fault_type to
340          * VM_FAULT_WRITE if the map entry is a virtual page table and also
341          * writable, so we can set the 'A'accessed bit in the virtual page
342          * table entry.
343          */
344         fs.map = map;
345         result = vm_map_lookup(&fs.map, vaddr, fault_type,
346                                &fs.entry, &fs.first_object,
347                                &first_pindex, &fs.first_prot, &fs.wired);
348
349         /*
350          * If the lookup failed or the map protections are incompatible,
351          * the fault generally fails.
352          *
353          * The failure could be due to TDF_NOFAULT if vm_map_lookup()
354          * tried to do a COW fault.
355          *
356          * If the caller is trying to do a user wiring we have more work
357          * to do.
358          */
359         if (result != KERN_SUCCESS) {
360                 if (result == KERN_FAILURE_NOFAULT) {
361                         result = KERN_FAILURE;
362                         goto done;
363                 }
364                 if (result != KERN_PROTECTION_FAILURE ||
365                     (fs.fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE)
366                 {
367                         if (result == KERN_INVALID_ADDRESS && growstack &&
368                             map != &kernel_map && curproc != NULL) {
369                                 result = vm_map_growstack(curproc, vaddr);
370                                 if (result == KERN_SUCCESS) {
371                                         growstack = 0;
372                                         ++retry;
373                                         goto RetryFault;
374                                 }
375                                 result = KERN_FAILURE;
376                         }
377                         goto done;
378                 }
379
380                 /*
381                  * If we are user-wiring a r/w segment, and it is COW, then
382                  * we need to do the COW operation.  Note that we don't
383                  * currently COW RO sections now, because it is NOT desirable
384                  * to COW .text.  We simply keep .text from ever being COW'ed
385                  * and take the heat that one cannot debug wired .text sections.
386                  */
387                 result = vm_map_lookup(&fs.map, vaddr,
388                                        VM_PROT_READ|VM_PROT_WRITE|
389                                         VM_PROT_OVERRIDE_WRITE,
390                                        &fs.entry, &fs.first_object,
391                                        &first_pindex, &fs.first_prot,
392                                        &fs.wired);
393                 if (result != KERN_SUCCESS) {
394                         /* could also be KERN_FAILURE_NOFAULT */
395                         result = KERN_FAILURE;
396                         goto done;
397                 }
398
399                 /*
400                  * If we don't COW now, on a user wire, the user will never
401                  * be able to write to the mapping.  If we don't make this
402                  * restriction, the bookkeeping would be nearly impossible.
403                  *
404                  * XXX We have a shared lock, this will have a MP race but
405                  * I don't see how it can hurt anything.
406                  */
407                 if ((fs.entry->protection & VM_PROT_WRITE) == 0)
408                         fs.entry->max_protection &= ~VM_PROT_WRITE;
409         }
410
411         /*
412          * fs.map is read-locked
413          *
414          * Misc checks.  Save the map generation number to detect races.
415          */
416         fs.map_generation = fs.map->timestamp;
417         fs.lookup_still_valid = TRUE;
418         fs.first_m = NULL;
419         fs.object = fs.first_object;    /* so unlock_and_deallocate works */
420         fs.prot = fs.first_prot;        /* default (used by uksmap) */
421
422         if (fs.entry->eflags & (MAP_ENTRY_NOFAULT | MAP_ENTRY_KSTACK)) {
423                 if (fs.entry->eflags & MAP_ENTRY_NOFAULT) {
424                         panic("vm_fault: fault on nofault entry, addr: %p",
425                               (void *)vaddr);
426                 }
427                 if ((fs.entry->eflags & MAP_ENTRY_KSTACK) &&
428                     vaddr >= fs.entry->start &&
429                     vaddr < fs.entry->start + PAGE_SIZE) {
430                         panic("vm_fault: fault on stack guard, addr: %p",
431                               (void *)vaddr);
432                 }
433         }
434
435         /*
436          * A user-kernel shared map has no VM object and bypasses
437          * everything.  We execute the uksmap function with a temporary
438          * fictitious vm_page.  The address is directly mapped with no
439          * management.
440          */
441         if (fs.entry->maptype == VM_MAPTYPE_UKSMAP) {
442                 struct vm_page fakem;
443
444                 bzero(&fakem, sizeof(fakem));
445                 fakem.pindex = first_pindex;
446                 fakem.flags = PG_BUSY | PG_FICTITIOUS | PG_UNMANAGED;
447                 fakem.valid = VM_PAGE_BITS_ALL;
448                 fakem.pat_mode = VM_MEMATTR_DEFAULT;
449                 if (fs.entry->object.uksmap(fs.entry->aux.dev, &fakem)) {
450                         result = KERN_FAILURE;
451                         unlock_things(&fs);
452                         goto done2;
453                 }
454                 pmap_enter(fs.map->pmap, vaddr, &fakem, fs.prot | inherit_prot,
455                            fs.wired, fs.entry);
456                 goto done_success;
457         }
458
459         /*
460          * A system map entry may return a NULL object.  No object means
461          * no pager means an unrecoverable kernel fault.
462          */
463         if (fs.first_object == NULL) {
464                 panic("vm_fault: unrecoverable fault at %p in entry %p",
465                         (void *)vaddr, fs.entry);
466         }
467
468         /*
469          * Fail here if not a trivial anonymous page fault and TDF_NOFAULT
470          * is set.
471          */
472         if ((curthread->td_flags & TDF_NOFAULT) &&
473             (retry ||
474              fs.first_object->type == OBJT_VNODE ||
475              fs.first_object->backing_object)) {
476                 result = KERN_FAILURE;
477                 unlock_things(&fs);
478                 goto done2;
479         }
480
481         /*
482          * If the entry is wired we cannot change the page protection.
483          */
484         if (fs.wired)
485                 fault_type = fs.first_prot;
486
487         /*
488          * We generally want to avoid unnecessary exclusive modes on backing
489          * and terminal objects because this can seriously interfere with
490          * heavily fork()'d processes (particularly /bin/sh scripts).
491          *
492          * However, we also want to avoid unnecessary retries due to needed
493          * shared->exclusive promotion for common faults.  Exclusive mode is
494          * always needed if any page insertion, rename, or free occurs in an
495          * object (and also indirectly if any I/O is done).
496          *
497          * The main issue here is going to be fs.first_shared.  If the
498          * first_object has a backing object which isn't shadowed and the
499          * process is single-threaded we might as well use an exclusive
500          * lock/chain right off the bat.
501          */
502         if (fs.first_shared && fs.first_object->backing_object &&
503             LIST_EMPTY(&fs.first_object->shadow_head) &&
504             curthread->td_proc && curthread->td_proc->p_nthreads == 1) {
505                 fs.first_shared = 0;
506         }
507
508         /*
509          * swap_pager_unswapped() needs an exclusive object
510          */
511         if (fault_flags & (VM_FAULT_UNSWAP | VM_FAULT_DIRTY)) {
512                 fs.first_shared = 0;
513         }
514
515         /*
516          * Obtain a top-level object lock, shared or exclusive depending
517          * on fs.first_shared.  If a shared lock winds up being insufficient
518          * we will retry with an exclusive lock.
519          *
520          * The vnode pager lock is always shared.
521          */
522         if (fs.first_shared)
523                 vm_object_hold_shared(fs.first_object);
524         else
525                 vm_object_hold(fs.first_object);
526         if (fs.vp == NULL)
527                 fs.vp = vnode_pager_lock(fs.first_object);
528
529         /*
530          * The page we want is at (first_object, first_pindex), but if the
531          * vm_map_entry is VM_MAPTYPE_VPAGETABLE we have to traverse the
532          * page table to figure out the actual pindex.
533          *
534          * NOTE!  DEVELOPMENT IN PROGRESS, THIS IS AN INITIAL IMPLEMENTATION
535          * ONLY
536          */
537         if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) {
538                 result = vm_fault_vpagetable(&fs, &first_pindex,
539                                              fs.entry->aux.master_pde,
540                                              fault_type, 1);
541                 if (result == KERN_TRY_AGAIN) {
542                         vm_object_drop(fs.first_object);
543                         ++retry;
544                         goto RetryFault;
545                 }
546                 if (result != KERN_SUCCESS)
547                         goto done;
548         }
549
550         /*
551          * Now we have the actual (object, pindex), fault in the page.  If
552          * vm_fault_object() fails it will unlock and deallocate the FS
553          * data.   If it succeeds everything remains locked and fs->object
554          * will have an additional PIP count if it is not equal to
555          * fs->first_object
556          *
557          * vm_fault_object will set fs->prot for the pmap operation.  It is
558          * allowed to set VM_PROT_WRITE if fault_type == VM_PROT_READ if the
559          * page can be safely written.  However, it will force a read-only
560          * mapping for a read fault if the memory is managed by a virtual
561          * page table.
562          *
563          * If the fault code uses the shared object lock shortcut
564          * we must not try to burst (we can't allocate VM pages).
565          */
566         result = vm_fault_object(&fs, first_pindex, fault_type, 1);
567
568         if (debug_fault > 0) {
569                 --debug_fault;
570                 kprintf("VM_FAULT result %d addr=%jx type=%02x flags=%02x "
571                         "fs.m=%p fs.prot=%02x fs.wired=%02x fs.entry=%p\n",
572                         result, (intmax_t)vaddr, fault_type, fault_flags,
573                         fs.m, fs.prot, fs.wired, fs.entry);
574         }
575
576         if (result == KERN_TRY_AGAIN) {
577                 vm_object_drop(fs.first_object);
578                 ++retry;
579                 goto RetryFault;
580         }
581         if (result != KERN_SUCCESS)
582                 goto done;
583
584         /*
585          * On success vm_fault_object() does not unlock or deallocate, and fs.m
586          * will contain a busied page.
587          *
588          * Enter the page into the pmap and do pmap-related adjustments.
589          */
590         KKASSERT(fs.lookup_still_valid == TRUE);
591         vm_page_flag_set(fs.m, PG_REFERENCED);
592         pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot | inherit_prot,
593                    fs.wired, fs.entry);
594
595         /*KKASSERT(fs.m->queue == PQ_NONE); page-in op may deactivate page */
596         KKASSERT(fs.m->flags & PG_BUSY);
597
598         /*
599          * If the page is not wired down, then put it where the pageout daemon
600          * can find it.
601          */
602         if (fs.fault_flags & VM_FAULT_WIRE_MASK) {
603                 if (fs.wired)
604                         vm_page_wire(fs.m);
605                 else
606                         vm_page_unwire(fs.m, 1);
607         } else {
608                 vm_page_activate(fs.m);
609         }
610         vm_page_wakeup(fs.m);
611
612         /*
613          * Burst in a few more pages if possible.  The fs.map should still
614          * be locked.  To avoid interlocking against a vnode->getblk
615          * operation we had to be sure to unbusy our primary vm_page above
616          * first.
617          *
618          * A normal burst can continue down backing store, only execute
619          * if we are holding an exclusive lock, otherwise the exclusive
620          * locks the burst code gets might cause excessive SMP collisions.
621          *
622          * A quick burst can be utilized when there is no backing object
623          * (i.e. a shared file mmap).
624          */
625         if ((fault_flags & VM_FAULT_BURST) &&
626             (fs.fault_flags & VM_FAULT_WIRE_MASK) == 0 &&
627             fs.wired == 0) {
628                 if (fs.first_shared == 0 && fs.shared == 0) {
629                         vm_prefault(fs.map->pmap, vaddr,
630                                     fs.entry, fs.prot, fault_flags);
631                 } else {
632                         vm_prefault_quick(fs.map->pmap, vaddr,
633                                           fs.entry, fs.prot, fault_flags);
634                 }
635         }
636
637 done_success:
638         mycpu->gd_cnt.v_vm_faults++;
639         if (curthread->td_lwp)
640                 ++curthread->td_lwp->lwp_ru.ru_minflt;
641
642         /*
643          * Unlock everything, and return
644          */
645         unlock_things(&fs);
646
647         if (curthread->td_lwp) {
648                 if (fs.hardfault) {
649                         curthread->td_lwp->lwp_ru.ru_majflt++;
650                 } else {
651                         curthread->td_lwp->lwp_ru.ru_minflt++;
652                 }
653         }
654
655         /*vm_object_deallocate(fs.first_object);*/
656         /*fs.m = NULL; */
657         /*fs.first_object = NULL; must still drop later */
658
659         result = KERN_SUCCESS;
660 done:
661         if (fs.first_object)
662                 vm_object_drop(fs.first_object);
663 done2:
664         lwkt_reltoken(&map->token);
665         if (lp)
666                 lp->lwp_flags &= ~LWP_PAGING;
667         if (vm_shared_fault && fs.shared == 0)
668                 ++vm_shared_miss;
669         return (result);
670 }
671
672 /*
673  * Fault in the specified virtual address in the current process map, 
674  * returning a held VM page or NULL.  See vm_fault_page() for more 
675  * information.
676  *
677  * No requirements.
678  */
679 vm_page_t
680 vm_fault_page_quick(vm_offset_t va, vm_prot_t fault_type, int *errorp)
681 {
682         struct lwp *lp = curthread->td_lwp;
683         vm_page_t m;
684
685         m = vm_fault_page(&lp->lwp_vmspace->vm_map, va, 
686                           fault_type, VM_FAULT_NORMAL, errorp);
687         return(m);
688 }
689
690 /*
691  * Fault in the specified virtual address in the specified map, doing all
692  * necessary manipulation of the object store and all necessary I/O.  Return
693  * a held VM page or NULL, and set *errorp.  The related pmap is not
694  * updated.
695  *
696  * The returned page will be properly dirtied if VM_PROT_WRITE was specified,
697  * and marked PG_REFERENCED as well.
698  *
699  * If the page cannot be faulted writable and VM_PROT_WRITE was specified, an
700  * error will be returned.
701  *
702  * No requirements.
703  */
704 vm_page_t
705 vm_fault_page(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
706               int fault_flags, int *errorp)
707 {
708         vm_pindex_t first_pindex;
709         struct faultstate fs;
710         int result;
711         int retry = 0;
712         vm_prot_t orig_fault_type = fault_type;
713
714         fs.hardfault = 0;
715         fs.fault_flags = fault_flags;
716         KKASSERT((fault_flags & VM_FAULT_WIRE_MASK) == 0);
717
718         /*
719          * Dive the pmap (concurrency possible).  If we find the
720          * appropriate page we can terminate early and quickly.
721          */
722         fs.m = pmap_fault_page_quick(map->pmap, vaddr, fault_type);
723         if (fs.m) {
724                 *errorp = 0;
725                 return(fs.m);
726         }
727
728         /*
729          * Otherwise take a concurrency hit and do a formal page
730          * fault.
731          */
732         fs.shared = vm_shared_fault;
733         fs.first_shared = vm_shared_fault;
734         fs.vp = NULL;
735         lwkt_gettoken(&map->token);
736
737         /*
738          * swap_pager_unswapped() needs an exclusive object
739          */
740         if (fault_flags & (VM_FAULT_UNSWAP | VM_FAULT_DIRTY)) {
741                 fs.first_shared = 0;
742         }
743
744 RetryFault:
745         /*
746          * Find the vm_map_entry representing the backing store and resolve
747          * the top level object and page index.  This may have the side
748          * effect of executing a copy-on-write on the map entry and/or
749          * creating a shadow object, but will not COW any actual VM pages.
750          *
751          * On success fs.map is left read-locked and various other fields 
752          * are initialized but not otherwise referenced or locked.
753          *
754          * NOTE!  vm_map_lookup will upgrade the fault_type to VM_FAULT_WRITE
755          * if the map entry is a virtual page table and also writable,
756          * so we can set the 'A'accessed bit in the virtual page table entry.
757          */
758         fs.map = map;
759         result = vm_map_lookup(&fs.map, vaddr, fault_type,
760                                &fs.entry, &fs.first_object,
761                                &first_pindex, &fs.first_prot, &fs.wired);
762
763         if (result != KERN_SUCCESS) {
764                 *errorp = result;
765                 fs.m = NULL;
766                 goto done;
767         }
768
769         /*
770          * fs.map is read-locked
771          *
772          * Misc checks.  Save the map generation number to detect races.
773          */
774         fs.map_generation = fs.map->timestamp;
775         fs.lookup_still_valid = TRUE;
776         fs.first_m = NULL;
777         fs.object = fs.first_object;    /* so unlock_and_deallocate works */
778
779         if (fs.entry->eflags & MAP_ENTRY_NOFAULT) {
780                 panic("vm_fault: fault on nofault entry, addr: %lx",
781                     (u_long)vaddr);
782         }
783
784         /*
785          * A user-kernel shared map has no VM object and bypasses
786          * everything.  We execute the uksmap function with a temporary
787          * fictitious vm_page.  The address is directly mapped with no
788          * management.
789          */
790         if (fs.entry->maptype == VM_MAPTYPE_UKSMAP) {
791                 struct vm_page fakem;
792
793                 bzero(&fakem, sizeof(fakem));
794                 fakem.pindex = first_pindex;
795                 fakem.flags = PG_BUSY | PG_FICTITIOUS | PG_UNMANAGED;
796                 fakem.valid = VM_PAGE_BITS_ALL;
797                 fakem.pat_mode = VM_MEMATTR_DEFAULT;
798                 if (fs.entry->object.uksmap(fs.entry->aux.dev, &fakem)) {
799                         *errorp = KERN_FAILURE;
800                         fs.m = NULL;
801                         unlock_things(&fs);
802                         goto done2;
803                 }
804                 fs.m = PHYS_TO_VM_PAGE(fakem.phys_addr);
805                 vm_page_hold(fs.m);
806
807                 unlock_things(&fs);
808                 *errorp = 0;
809                 goto done;
810         }
811
812
813         /*
814          * A system map entry may return a NULL object.  No object means
815          * no pager means an unrecoverable kernel fault.
816          */
817         if (fs.first_object == NULL) {
818                 panic("vm_fault: unrecoverable fault at %p in entry %p",
819                         (void *)vaddr, fs.entry);
820         }
821
822         /*
823          * Fail here if not a trivial anonymous page fault and TDF_NOFAULT
824          * is set.
825          */
826         if ((curthread->td_flags & TDF_NOFAULT) &&
827             (retry ||
828              fs.first_object->type == OBJT_VNODE ||
829              fs.first_object->backing_object)) {
830                 *errorp = KERN_FAILURE;
831                 unlock_things(&fs);
832                 goto done2;
833         }
834
835         /*
836          * If the entry is wired we cannot change the page protection.
837          */
838         if (fs.wired)
839                 fault_type = fs.first_prot;
840
841         /*
842          * Make a reference to this object to prevent its disposal while we
843          * are messing with it.  Once we have the reference, the map is free
844          * to be diddled.  Since objects reference their shadows (and copies),
845          * they will stay around as well.
846          *
847          * The reference should also prevent an unexpected collapse of the
848          * parent that might move pages from the current object into the
849          * parent unexpectedly, resulting in corruption.
850          *
851          * Bump the paging-in-progress count to prevent size changes (e.g.
852          * truncation operations) during I/O.  This must be done after
853          * obtaining the vnode lock in order to avoid possible deadlocks.
854          */
855         if (fs.first_shared)
856                 vm_object_hold_shared(fs.first_object);
857         else
858                 vm_object_hold(fs.first_object);
859         if (fs.vp == NULL)
860                 fs.vp = vnode_pager_lock(fs.first_object);      /* shared */
861
862         /*
863          * The page we want is at (first_object, first_pindex), but if the
864          * vm_map_entry is VM_MAPTYPE_VPAGETABLE we have to traverse the
865          * page table to figure out the actual pindex.
866          *
867          * NOTE!  DEVELOPMENT IN PROGRESS, THIS IS AN INITIAL IMPLEMENTATION
868          * ONLY
869          */
870         if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) {
871                 result = vm_fault_vpagetable(&fs, &first_pindex,
872                                              fs.entry->aux.master_pde,
873                                              fault_type, 1);
874                 if (result == KERN_TRY_AGAIN) {
875                         vm_object_drop(fs.first_object);
876                         ++retry;
877                         goto RetryFault;
878                 }
879                 if (result != KERN_SUCCESS) {
880                         *errorp = result;
881                         fs.m = NULL;
882                         goto done;
883                 }
884         }
885
886         /*
887          * Now we have the actual (object, pindex), fault in the page.  If
888          * vm_fault_object() fails it will unlock and deallocate the FS
889          * data.   If it succeeds everything remains locked and fs->object
890          * will have an additinal PIP count if it is not equal to
891          * fs->first_object
892          */
893         fs.m = NULL;
894         result = vm_fault_object(&fs, first_pindex, fault_type, 1);
895
896         if (result == KERN_TRY_AGAIN) {
897                 vm_object_drop(fs.first_object);
898                 ++retry;
899                 goto RetryFault;
900         }
901         if (result != KERN_SUCCESS) {
902                 *errorp = result;
903                 fs.m = NULL;
904                 goto done;
905         }
906
907         if ((orig_fault_type & VM_PROT_WRITE) &&
908             (fs.prot & VM_PROT_WRITE) == 0) {
909                 *errorp = KERN_PROTECTION_FAILURE;
910                 unlock_and_deallocate(&fs);
911                 fs.m = NULL;
912                 goto done;
913         }
914
915         /*
916          * DO NOT UPDATE THE PMAP!!!  This function may be called for
917          * a pmap unrelated to the current process pmap, in which case
918          * the current cpu core will not be listed in the pmap's pm_active
919          * mask.  Thus invalidation interlocks will fail to work properly.
920          *
921          * (for example, 'ps' uses procfs to read program arguments from
922          * each process's stack).
923          *
924          * In addition to the above this function will be called to acquire
925          * a page that might already be faulted in, re-faulting it
926          * continuously is a waste of time.
927          *
928          * XXX could this have been the cause of our random seg-fault
929          *     issues?  procfs accesses user stacks.
930          */
931         vm_page_flag_set(fs.m, PG_REFERENCED);
932 #if 0
933         pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, fs.wired, NULL);
934         mycpu->gd_cnt.v_vm_faults++;
935         if (curthread->td_lwp)
936                 ++curthread->td_lwp->lwp_ru.ru_minflt;
937 #endif
938
939         /*
940          * On success vm_fault_object() does not unlock or deallocate, and fs.m
941          * will contain a busied page.  So we must unlock here after having
942          * messed with the pmap.
943          */
944         unlock_things(&fs);
945
946         /*
947          * Return a held page.  We are not doing any pmap manipulation so do
948          * not set PG_MAPPED.  However, adjust the page flags according to
949          * the fault type because the caller may not use a managed pmapping
950          * (so we don't want to lose the fact that the page will be dirtied
951          * if a write fault was specified).
952          */
953         vm_page_hold(fs.m);
954         vm_page_activate(fs.m);
955         if (fault_type & VM_PROT_WRITE)
956                 vm_page_dirty(fs.m);
957
958         if (curthread->td_lwp) {
959                 if (fs.hardfault) {
960                         curthread->td_lwp->lwp_ru.ru_majflt++;
961                 } else {
962                         curthread->td_lwp->lwp_ru.ru_minflt++;
963                 }
964         }
965
966         /*
967          * Unlock everything, and return the held page.
968          */
969         vm_page_wakeup(fs.m);
970         /*vm_object_deallocate(fs.first_object);*/
971         /*fs.first_object = NULL; */
972         *errorp = 0;
973
974 done:
975         if (fs.first_object)
976                 vm_object_drop(fs.first_object);
977 done2:
978         lwkt_reltoken(&map->token);
979         return(fs.m);
980 }
981
982 /*
983  * Fault in the specified (object,offset), dirty the returned page as
984  * needed.  If the requested fault_type cannot be done NULL and an
985  * error is returned.
986  *
987  * A held (but not busied) page is returned.
988  *
989  * The passed in object must be held as specified by the shared
990  * argument.
991  */
992 vm_page_t
993 vm_fault_object_page(vm_object_t object, vm_ooffset_t offset,
994                      vm_prot_t fault_type, int fault_flags,
995                      int *sharedp, int *errorp)
996 {
997         int result;
998         vm_pindex_t first_pindex;
999         struct faultstate fs;
1000         struct vm_map_entry entry;
1001
1002         ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
1003         bzero(&entry, sizeof(entry));
1004         entry.object.vm_object = object;
1005         entry.maptype = VM_MAPTYPE_NORMAL;
1006         entry.protection = entry.max_protection = fault_type;
1007
1008         fs.hardfault = 0;
1009         fs.fault_flags = fault_flags;
1010         fs.map = NULL;
1011         fs.shared = vm_shared_fault;
1012         fs.first_shared = *sharedp;
1013         fs.vp = NULL;
1014         KKASSERT((fault_flags & VM_FAULT_WIRE_MASK) == 0);
1015
1016         /*
1017          * Might require swap block adjustments
1018          */
1019         if (fs.first_shared && (fault_flags & (VM_FAULT_UNSWAP | VM_FAULT_DIRTY))) {
1020                 fs.first_shared = 0;
1021                 vm_object_upgrade(object);
1022         }
1023
1024         /*
1025          * Retry loop as needed (typically for shared->exclusive transitions)
1026          */
1027 RetryFault:
1028         *sharedp = fs.first_shared;
1029         first_pindex = OFF_TO_IDX(offset);
1030         fs.first_object = object;
1031         fs.entry = &entry;
1032         fs.first_prot = fault_type;
1033         fs.wired = 0;
1034         /*fs.map_generation = 0; unused */
1035
1036         /*
1037          * Make a reference to this object to prevent its disposal while we
1038          * are messing with it.  Once we have the reference, the map is free
1039          * to be diddled.  Since objects reference their shadows (and copies),
1040          * they will stay around as well.
1041          *
1042          * The reference should also prevent an unexpected collapse of the
1043          * parent that might move pages from the current object into the
1044          * parent unexpectedly, resulting in corruption.
1045          *
1046          * Bump the paging-in-progress count to prevent size changes (e.g.
1047          * truncation operations) during I/O.  This must be done after
1048          * obtaining the vnode lock in order to avoid possible deadlocks.
1049          */
1050         if (fs.vp == NULL)
1051                 fs.vp = vnode_pager_lock(fs.first_object);
1052
1053         fs.lookup_still_valid = TRUE;
1054         fs.first_m = NULL;
1055         fs.object = fs.first_object;    /* so unlock_and_deallocate works */
1056
1057 #if 0
1058         /* XXX future - ability to operate on VM object using vpagetable */
1059         if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) {
1060                 result = vm_fault_vpagetable(&fs, &first_pindex,
1061                                              fs.entry->aux.master_pde,
1062                                              fault_type, 0);
1063                 if (result == KERN_TRY_AGAIN) {
1064                         if (fs.first_shared == 0 && *sharedp)
1065                                 vm_object_upgrade(object);
1066                         goto RetryFault;
1067                 }
1068                 if (result != KERN_SUCCESS) {
1069                         *errorp = result;
1070                         return (NULL);
1071                 }
1072         }
1073 #endif
1074
1075         /*
1076          * Now we have the actual (object, pindex), fault in the page.  If
1077          * vm_fault_object() fails it will unlock and deallocate the FS
1078          * data.   If it succeeds everything remains locked and fs->object
1079          * will have an additinal PIP count if it is not equal to
1080          * fs->first_object
1081          *
1082          * On KERN_TRY_AGAIN vm_fault_object() leaves fs.first_object intact.
1083          * We may have to upgrade its lock to handle the requested fault.
1084          */
1085         result = vm_fault_object(&fs, first_pindex, fault_type, 0);
1086
1087         if (result == KERN_TRY_AGAIN) {
1088                 if (fs.first_shared == 0 && *sharedp)
1089                         vm_object_upgrade(object);
1090                 goto RetryFault;
1091         }
1092         if (result != KERN_SUCCESS) {
1093                 *errorp = result;
1094                 return(NULL);
1095         }
1096
1097         if ((fault_type & VM_PROT_WRITE) && (fs.prot & VM_PROT_WRITE) == 0) {
1098                 *errorp = KERN_PROTECTION_FAILURE;
1099                 unlock_and_deallocate(&fs);
1100                 return(NULL);
1101         }
1102
1103         /*
1104          * On success vm_fault_object() does not unlock or deallocate, so we
1105          * do it here.  Note that the returned fs.m will be busied.
1106          */
1107         unlock_things(&fs);
1108
1109         /*
1110          * Return a held page.  We are not doing any pmap manipulation so do
1111          * not set PG_MAPPED.  However, adjust the page flags according to
1112          * the fault type because the caller may not use a managed pmapping
1113          * (so we don't want to lose the fact that the page will be dirtied
1114          * if a write fault was specified).
1115          */
1116         vm_page_hold(fs.m);
1117         vm_page_activate(fs.m);
1118         if ((fault_type & VM_PROT_WRITE) || (fault_flags & VM_FAULT_DIRTY))
1119                 vm_page_dirty(fs.m);
1120         if (fault_flags & VM_FAULT_UNSWAP)
1121                 swap_pager_unswapped(fs.m);
1122
1123         /*
1124          * Indicate that the page was accessed.
1125          */
1126         vm_page_flag_set(fs.m, PG_REFERENCED);
1127
1128         if (curthread->td_lwp) {
1129                 if (fs.hardfault) {
1130                         curthread->td_lwp->lwp_ru.ru_majflt++;
1131                 } else {
1132                         curthread->td_lwp->lwp_ru.ru_minflt++;
1133                 }
1134         }
1135
1136         /*
1137          * Unlock everything, and return the held page.
1138          */
1139         vm_page_wakeup(fs.m);
1140         /*vm_object_deallocate(fs.first_object);*/
1141         /*fs.first_object = NULL; */
1142
1143         *errorp = 0;
1144         return(fs.m);
1145 }
1146
1147 /*
1148  * Translate the virtual page number (first_pindex) that is relative
1149  * to the address space into a logical page number that is relative to the
1150  * backing object.  Use the virtual page table pointed to by (vpte).
1151  *
1152  * This implements an N-level page table.  Any level can terminate the
1153  * scan by setting VPTE_PS.   A linear mapping is accomplished by setting
1154  * VPTE_PS in the master page directory entry set via mcontrol(MADV_SETMAP).
1155  */
1156 static
1157 int
1158 vm_fault_vpagetable(struct faultstate *fs, vm_pindex_t *pindex,
1159                     vpte_t vpte, int fault_type, int allow_nofault)
1160 {
1161         struct lwbuf *lwb;
1162         struct lwbuf lwb_cache;
1163         int vshift = VPTE_FRAME_END - PAGE_SHIFT; /* index bits remaining */
1164         int result = KERN_SUCCESS;
1165         vpte_t *ptep;
1166
1167         ASSERT_LWKT_TOKEN_HELD(vm_object_token(fs->first_object));
1168         for (;;) {
1169                 /*
1170                  * We cannot proceed if the vpte is not valid, not readable
1171                  * for a read fault, or not writable for a write fault.
1172                  */
1173                 if ((vpte & VPTE_V) == 0) {
1174                         unlock_and_deallocate(fs);
1175                         return (KERN_FAILURE);
1176                 }
1177                 if ((fault_type & VM_PROT_WRITE) && (vpte & VPTE_RW) == 0) {
1178                         unlock_and_deallocate(fs);
1179                         return (KERN_FAILURE);
1180                 }
1181                 if ((vpte & VPTE_PS) || vshift == 0)
1182                         break;
1183                 KKASSERT(vshift >= VPTE_PAGE_BITS);
1184
1185                 /*
1186                  * Get the page table page.  Nominally we only read the page
1187                  * table, but since we are actively setting VPTE_M and VPTE_A,
1188                  * tell vm_fault_object() that we are writing it. 
1189                  *
1190                  * There is currently no real need to optimize this.
1191                  */
1192                 result = vm_fault_object(fs, (vpte & VPTE_FRAME) >> PAGE_SHIFT,
1193                                          VM_PROT_READ|VM_PROT_WRITE,
1194                                          allow_nofault);
1195                 if (result != KERN_SUCCESS)
1196                         return (result);
1197
1198                 /*
1199                  * Process the returned fs.m and look up the page table
1200                  * entry in the page table page.
1201                  */
1202                 vshift -= VPTE_PAGE_BITS;
1203                 lwb = lwbuf_alloc(fs->m, &lwb_cache);
1204                 ptep = ((vpte_t *)lwbuf_kva(lwb) +
1205                         ((*pindex >> vshift) & VPTE_PAGE_MASK));
1206                 vpte = *ptep;
1207
1208                 /*
1209                  * Page table write-back.  If the vpte is valid for the
1210                  * requested operation, do a write-back to the page table.
1211                  *
1212                  * XXX VPTE_M is not set properly for page directory pages.
1213                  * It doesn't get set in the page directory if the page table
1214                  * is modified during a read access.
1215                  */
1216                 vm_page_activate(fs->m);
1217                 if ((fault_type & VM_PROT_WRITE) && (vpte & VPTE_V) &&
1218                     (vpte & VPTE_RW)) {
1219                         if ((vpte & (VPTE_M|VPTE_A)) != (VPTE_M|VPTE_A)) {
1220                                 atomic_set_long(ptep, VPTE_M | VPTE_A);
1221                                 vm_page_dirty(fs->m);
1222                         }
1223                 }
1224                 if ((fault_type & VM_PROT_READ) && (vpte & VPTE_V)) {
1225                         if ((vpte & VPTE_A) == 0) {
1226                                 atomic_set_long(ptep, VPTE_A);
1227                                 vm_page_dirty(fs->m);
1228                         }
1229                 }
1230                 lwbuf_free(lwb);
1231                 vm_page_flag_set(fs->m, PG_REFERENCED);
1232                 vm_page_wakeup(fs->m);
1233                 fs->m = NULL;
1234                 cleanup_successful_fault(fs);
1235         }
1236         /*
1237          * Combine remaining address bits with the vpte.
1238          */
1239         /* JG how many bits from each? */
1240         *pindex = ((vpte & VPTE_FRAME) >> PAGE_SHIFT) +
1241                   (*pindex & ((1L << vshift) - 1));
1242         return (KERN_SUCCESS);
1243 }
1244
1245
1246 /*
1247  * This is the core of the vm_fault code.
1248  *
1249  * Do all operations required to fault-in (fs.first_object, pindex).  Run
1250  * through the shadow chain as necessary and do required COW or virtual
1251  * copy operations.  The caller has already fully resolved the vm_map_entry
1252  * and, if appropriate, has created a copy-on-write layer.  All we need to
1253  * do is iterate the object chain.
1254  *
1255  * On failure (fs) is unlocked and deallocated and the caller may return or
1256  * retry depending on the failure code.  On success (fs) is NOT unlocked or
1257  * deallocated, fs.m will contained a resolved, busied page, and fs.object
1258  * will have an additional PIP count if it is not equal to fs.first_object.
1259  *
1260  * If locks based on fs->first_shared or fs->shared are insufficient,
1261  * clear the appropriate field(s) and return RETRY.  COWs require that
1262  * first_shared be 0, while page allocations (or frees) require that
1263  * shared be 0.  Renames require that both be 0.
1264  *
1265  * fs->first_object must be held on call.
1266  */
1267 static
1268 int
1269 vm_fault_object(struct faultstate *fs, vm_pindex_t first_pindex,
1270                 vm_prot_t fault_type, int allow_nofault)
1271 {
1272         vm_object_t next_object;
1273         vm_pindex_t pindex;
1274         int error;
1275
1276         ASSERT_LWKT_TOKEN_HELD(vm_object_token(fs->first_object));
1277         fs->prot = fs->first_prot;
1278         fs->object = fs->first_object;
1279         pindex = first_pindex;
1280
1281         vm_object_chain_acquire(fs->first_object, fs->shared);
1282         vm_object_pip_add(fs->first_object, 1);
1283
1284         /* 
1285          * If a read fault occurs we try to make the page writable if
1286          * possible.  There are three cases where we cannot make the
1287          * page mapping writable:
1288          *
1289          * (1) The mapping is read-only or the VM object is read-only,
1290          *     fs->prot above will simply not have VM_PROT_WRITE set.
1291          *
1292          * (2) If the mapping is a virtual page table we need to be able
1293          *     to detect writes so we can set VPTE_M in the virtual page
1294          *     table.
1295          *
1296          * (3) If the VM page is read-only or copy-on-write, upgrading would
1297          *     just result in an unnecessary COW fault.
1298          *
1299          * VM_PROT_VPAGED is set if faulting via a virtual page table and
1300          * causes adjustments to the 'M'odify bit to also turn off write
1301          * access to force a re-fault.
1302          */
1303         if (fs->entry->maptype == VM_MAPTYPE_VPAGETABLE) {
1304                 if ((fault_type & VM_PROT_WRITE) == 0)
1305                         fs->prot &= ~VM_PROT_WRITE;
1306         }
1307
1308         if (curthread->td_lwp && curthread->td_lwp->lwp_vmspace &&
1309             pmap_emulate_ad_bits(&curthread->td_lwp->lwp_vmspace->vm_pmap)) {
1310                 if ((fault_type & VM_PROT_WRITE) == 0)
1311                         fs->prot &= ~VM_PROT_WRITE;
1312         }
1313
1314         /* vm_object_hold(fs->object); implied b/c object == first_object */
1315
1316         for (;;) {
1317                 /*
1318                  * The entire backing chain from first_object to object
1319                  * inclusive is chainlocked.
1320                  *
1321                  * If the object is dead, we stop here
1322                  */
1323                 if (fs->object->flags & OBJ_DEAD) {
1324                         vm_object_pip_wakeup(fs->first_object);
1325                         vm_object_chain_release_all(fs->first_object,
1326                                                     fs->object);
1327                         if (fs->object != fs->first_object)
1328                                 vm_object_drop(fs->object);
1329                         unlock_and_deallocate(fs);
1330                         return (KERN_PROTECTION_FAILURE);
1331                 }
1332
1333                 /*
1334                  * See if the page is resident.  Wait/Retry if the page is
1335                  * busy (lots of stuff may have changed so we can't continue
1336                  * in that case).
1337                  *
1338                  * We can theoretically allow the soft-busy case on a read
1339                  * fault if the page is marked valid, but since such
1340                  * pages are typically already pmap'd, putting that
1341                  * special case in might be more effort then it is
1342                  * worth.  We cannot under any circumstances mess
1343                  * around with a vm_page_t->busy page except, perhaps,
1344                  * to pmap it.
1345                  */
1346                 fs->m = vm_page_lookup_busy_try(fs->object, pindex,
1347                                                 TRUE, &error);
1348                 if (error) {
1349                         vm_object_pip_wakeup(fs->first_object);
1350                         vm_object_chain_release_all(fs->first_object,
1351                                                     fs->object);
1352                         if (fs->object != fs->first_object)
1353                                 vm_object_drop(fs->object);
1354                         unlock_things(fs);
1355                         vm_page_sleep_busy(fs->m, TRUE, "vmpfw");
1356                         mycpu->gd_cnt.v_intrans++;
1357                         /*vm_object_deallocate(fs->first_object);*/
1358                         /*fs->first_object = NULL;*/
1359                         fs->m = NULL;
1360                         return (KERN_TRY_AGAIN);
1361                 }
1362                 if (fs->m) {
1363                         /*
1364                          * The page is busied for us.
1365                          *
1366                          * If reactivating a page from PQ_CACHE we may have
1367                          * to rate-limit.
1368                          */
1369                         int queue = fs->m->queue;
1370                         vm_page_unqueue_nowakeup(fs->m);
1371
1372                         if ((queue - fs->m->pc) == PQ_CACHE && 
1373                             vm_page_count_severe()) {
1374                                 vm_page_activate(fs->m);
1375                                 vm_page_wakeup(fs->m);
1376                                 fs->m = NULL;
1377                                 vm_object_pip_wakeup(fs->first_object);
1378                                 vm_object_chain_release_all(fs->first_object,
1379                                                             fs->object);
1380                                 if (fs->object != fs->first_object)
1381                                         vm_object_drop(fs->object);
1382                                 unlock_and_deallocate(fs);
1383                                 if (allow_nofault == 0 ||
1384                                     (curthread->td_flags & TDF_NOFAULT) == 0) {
1385                                         vm_wait_pfault();
1386                                 }
1387                                 return (KERN_TRY_AGAIN);
1388                         }
1389
1390                         /*
1391                          * If it still isn't completely valid (readable),
1392                          * or if a read-ahead-mark is set on the VM page,
1393                          * jump to readrest, else we found the page and
1394                          * can return.
1395                          *
1396                          * We can release the spl once we have marked the
1397                          * page busy.
1398                          */
1399                         if (fs->m->object != &kernel_object) {
1400                                 if ((fs->m->valid & VM_PAGE_BITS_ALL) !=
1401                                     VM_PAGE_BITS_ALL) {
1402                                         goto readrest;
1403                                 }
1404                                 if (fs->m->flags & PG_RAM) {
1405                                         if (debug_cluster)
1406                                                 kprintf("R");
1407                                         vm_page_flag_clear(fs->m, PG_RAM);
1408                                         goto readrest;
1409                                 }
1410                         }
1411                         break; /* break to PAGE HAS BEEN FOUND */
1412                 }
1413
1414                 /*
1415                  * Page is not resident, If this is the search termination
1416                  * or the pager might contain the page, allocate a new page.
1417                  */
1418                 if (TRYPAGER(fs) || fs->object == fs->first_object) {
1419                         /*
1420                          * Allocating, must be exclusive.
1421                          */
1422                         if (fs->object == fs->first_object &&
1423                             fs->first_shared) {
1424                                 fs->first_shared = 0;
1425                                 vm_object_pip_wakeup(fs->first_object);
1426                                 vm_object_chain_release_all(fs->first_object,
1427                                                             fs->object);
1428                                 if (fs->object != fs->first_object)
1429                                         vm_object_drop(fs->object);
1430                                 unlock_and_deallocate(fs);
1431                                 return (KERN_TRY_AGAIN);
1432                         }
1433                         if (fs->object != fs->first_object &&
1434                             fs->shared) {
1435                                 fs->first_shared = 0;
1436                                 fs->shared = 0;
1437                                 vm_object_pip_wakeup(fs->first_object);
1438                                 vm_object_chain_release_all(fs->first_object,
1439                                                             fs->object);
1440                                 if (fs->object != fs->first_object)
1441                                         vm_object_drop(fs->object);
1442                                 unlock_and_deallocate(fs);
1443                                 return (KERN_TRY_AGAIN);
1444                         }
1445
1446                         /*
1447                          * If the page is beyond the object size we fail
1448                          */
1449                         if (pindex >= fs->object->size) {
1450                                 vm_object_pip_wakeup(fs->first_object);
1451                                 vm_object_chain_release_all(fs->first_object,
1452                                                             fs->object);
1453                                 if (fs->object != fs->first_object)
1454                                         vm_object_drop(fs->object);
1455                                 unlock_and_deallocate(fs);
1456                                 return (KERN_PROTECTION_FAILURE);
1457                         }
1458
1459                         /*
1460                          * Allocate a new page for this object/offset pair.
1461                          *
1462                          * It is possible for the allocation to race, so
1463                          * handle the case.
1464                          */
1465                         fs->m = NULL;
1466                         if (!vm_page_count_severe()) {
1467                                 fs->m = vm_page_alloc(fs->object, pindex,
1468                                     ((fs->vp || fs->object->backing_object) ?
1469                                         VM_ALLOC_NULL_OK | VM_ALLOC_NORMAL :
1470                                         VM_ALLOC_NULL_OK | VM_ALLOC_NORMAL |
1471                                         VM_ALLOC_USE_GD | VM_ALLOC_ZERO));
1472                         }
1473                         if (fs->m == NULL) {
1474                                 vm_object_pip_wakeup(fs->first_object);
1475                                 vm_object_chain_release_all(fs->first_object,
1476                                                             fs->object);
1477                                 if (fs->object != fs->first_object)
1478                                         vm_object_drop(fs->object);
1479                                 unlock_and_deallocate(fs);
1480                                 if (allow_nofault == 0 ||
1481                                     (curthread->td_flags & TDF_NOFAULT) == 0) {
1482                                         vm_wait_pfault();
1483                                 }
1484                                 return (KERN_TRY_AGAIN);
1485                         }
1486
1487                         /*
1488                          * Fall through to readrest.  We have a new page which
1489                          * will have to be paged (since m->valid will be 0).
1490                          */
1491                 }
1492
1493 readrest:
1494                 /*
1495                  * We have found an invalid or partially valid page, a
1496                  * page with a read-ahead mark which might be partially or
1497                  * fully valid (and maybe dirty too), or we have allocated
1498                  * a new page.
1499                  *
1500                  * Attempt to fault-in the page if there is a chance that the
1501                  * pager has it, and potentially fault in additional pages
1502                  * at the same time.
1503                  *
1504                  * If TRYPAGER is true then fs.m will be non-NULL and busied
1505                  * for us.
1506                  */
1507                 if (TRYPAGER(fs)) {
1508                         int rv;
1509                         int seqaccess;
1510                         u_char behavior = vm_map_entry_behavior(fs->entry);
1511
1512                         if (behavior == MAP_ENTRY_BEHAV_RANDOM)
1513                                 seqaccess = 0;
1514                         else
1515                                 seqaccess = -1;
1516
1517                         /*
1518                          * Doing I/O may synchronously insert additional
1519                          * pages so we can't be shared at this point either.
1520                          *
1521                          * NOTE: We can't free fs->m here in the allocated
1522                          *       case (fs->object != fs->first_object) as
1523                          *       this would require an exclusively locked
1524                          *       VM object.
1525                          */
1526                         if (fs->object == fs->first_object &&
1527                             fs->first_shared) {
1528                                 vm_page_deactivate(fs->m);
1529                                 vm_page_wakeup(fs->m);
1530                                 fs->m = NULL;
1531                                 fs->first_shared = 0;
1532                                 vm_object_pip_wakeup(fs->first_object);
1533                                 vm_object_chain_release_all(fs->first_object,
1534                                                             fs->object);
1535                                 if (fs->object != fs->first_object)
1536                                         vm_object_drop(fs->object);
1537                                 unlock_and_deallocate(fs);
1538                                 return (KERN_TRY_AGAIN);
1539                         }
1540                         if (fs->object != fs->first_object &&
1541                             fs->shared) {
1542                                 vm_page_deactivate(fs->m);
1543                                 vm_page_wakeup(fs->m);
1544                                 fs->m = NULL;
1545                                 fs->first_shared = 0;
1546                                 fs->shared = 0;
1547                                 vm_object_pip_wakeup(fs->first_object);
1548                                 vm_object_chain_release_all(fs->first_object,
1549                                                             fs->object);
1550                                 if (fs->object != fs->first_object)
1551                                         vm_object_drop(fs->object);
1552                                 unlock_and_deallocate(fs);
1553                                 return (KERN_TRY_AGAIN);
1554                         }
1555
1556                         /*
1557                          * Avoid deadlocking against the map when doing I/O.
1558                          * fs.object and the page is PG_BUSY'd.
1559                          *
1560                          * NOTE: Once unlocked, fs->entry can become stale
1561                          *       so this will NULL it out.
1562                          *
1563                          * NOTE: fs->entry is invalid until we relock the
1564                          *       map and verify that the timestamp has not
1565                          *       changed.
1566                          */
1567                         unlock_map(fs);
1568
1569                         /*
1570                          * Acquire the page data.  We still hold a ref on
1571                          * fs.object and the page has been PG_BUSY's.
1572                          *
1573                          * The pager may replace the page (for example, in
1574                          * order to enter a fictitious page into the
1575                          * object).  If it does so it is responsible for
1576                          * cleaning up the passed page and properly setting
1577                          * the new page PG_BUSY.
1578                          *
1579                          * If we got here through a PG_RAM read-ahead
1580                          * mark the page may be partially dirty and thus
1581                          * not freeable.  Don't bother checking to see
1582                          * if the pager has the page because we can't free
1583                          * it anyway.  We have to depend on the get_page
1584                          * operation filling in any gaps whether there is
1585                          * backing store or not.
1586                          */
1587                         rv = vm_pager_get_page(fs->object, &fs->m, seqaccess);
1588
1589                         if (rv == VM_PAGER_OK) {
1590                                 /*
1591                                  * Relookup in case pager changed page. Pager
1592                                  * is responsible for disposition of old page
1593                                  * if moved.
1594                                  *
1595                                  * XXX other code segments do relookups too.
1596                                  * It's a bad abstraction that needs to be
1597                                  * fixed/removed.
1598                                  */
1599                                 fs->m = vm_page_lookup(fs->object, pindex);
1600                                 if (fs->m == NULL) {
1601                                         vm_object_pip_wakeup(fs->first_object);
1602                                         vm_object_chain_release_all(
1603                                                 fs->first_object, fs->object);
1604                                         if (fs->object != fs->first_object)
1605                                                 vm_object_drop(fs->object);
1606                                         unlock_and_deallocate(fs);
1607                                         return (KERN_TRY_AGAIN);
1608                                 }
1609                                 ++fs->hardfault;
1610                                 break; /* break to PAGE HAS BEEN FOUND */
1611                         }
1612
1613                         /*
1614                          * Remove the bogus page (which does not exist at this
1615                          * object/offset); before doing so, we must get back
1616                          * our object lock to preserve our invariant.
1617                          *
1618                          * Also wake up any other process that may want to bring
1619                          * in this page.
1620                          *
1621                          * If this is the top-level object, we must leave the
1622                          * busy page to prevent another process from rushing
1623                          * past us, and inserting the page in that object at
1624                          * the same time that we are.
1625                          */
1626                         if (rv == VM_PAGER_ERROR) {
1627                                 if (curproc) {
1628                                         kprintf("vm_fault: pager read error, "
1629                                                 "pid %d (%s)\n",
1630                                                 curproc->p_pid,
1631                                                 curproc->p_comm);
1632                                 } else {
1633                                         kprintf("vm_fault: pager read error, "
1634                                                 "thread %p (%s)\n",
1635                                                 curthread,
1636                                                 curproc->p_comm);
1637                                 }
1638                         }
1639
1640                         /*
1641                          * Data outside the range of the pager or an I/O error
1642                          *
1643                          * The page may have been wired during the pagein,
1644                          * e.g. by the buffer cache, and cannot simply be
1645                          * freed.  Call vnode_pager_freepage() to deal with it.
1646                          *
1647                          * Also note that we cannot free the page if we are
1648                          * holding the related object shared. XXX not sure
1649                          * what to do in that case.
1650                          */
1651                         if (fs->object != fs->first_object) {
1652                                 vnode_pager_freepage(fs->m);
1653                                 fs->m = NULL;
1654                                 /*
1655                                  * XXX - we cannot just fall out at this
1656                                  * point, m has been freed and is invalid!
1657                                  */
1658                         }
1659                         /*
1660                          * XXX - the check for kernel_map is a kludge to work
1661                          * around having the machine panic on a kernel space
1662                          * fault w/ I/O error.
1663                          */
1664                         if (((fs->map != &kernel_map) &&
1665                             (rv == VM_PAGER_ERROR)) || (rv == VM_PAGER_BAD)) {
1666                                 if (fs->m) {
1667                                         if (fs->first_shared) {
1668                                                 vm_page_deactivate(fs->m);
1669                                                 vm_page_wakeup(fs->m);
1670                                         } else {
1671                                                 vnode_pager_freepage(fs->m);
1672                                         }
1673                                         fs->m = NULL;
1674                                 }
1675                                 vm_object_pip_wakeup(fs->first_object);
1676                                 vm_object_chain_release_all(fs->first_object,
1677                                                             fs->object);
1678                                 if (fs->object != fs->first_object)
1679                                         vm_object_drop(fs->object);
1680                                 unlock_and_deallocate(fs);
1681                                 if (rv == VM_PAGER_ERROR)
1682                                         return (KERN_FAILURE);
1683                                 else
1684                                         return (KERN_PROTECTION_FAILURE);
1685                                 /* NOT REACHED */
1686                         }
1687                 }
1688
1689                 /*
1690                  * We get here if the object has a default pager (or unwiring) 
1691                  * or the pager doesn't have the page.
1692                  *
1693                  * fs->first_m will be used for the COW unless we find a
1694                  * deeper page to be mapped read-only, in which case the
1695                  * unlock*(fs) will free first_m.
1696                  */
1697                 if (fs->object == fs->first_object)
1698                         fs->first_m = fs->m;
1699
1700                 /*
1701                  * Move on to the next object.  The chain lock should prevent
1702                  * the backing_object from getting ripped out from under us.
1703                  *
1704                  * The object lock for the next object is governed by
1705                  * fs->shared.
1706                  */
1707                 if ((next_object = fs->object->backing_object) != NULL) {
1708                         if (fs->shared)
1709                                 vm_object_hold_shared(next_object);
1710                         else
1711                                 vm_object_hold(next_object);
1712                         vm_object_chain_acquire(next_object, fs->shared);
1713                         KKASSERT(next_object == fs->object->backing_object);
1714                         pindex += OFF_TO_IDX(fs->object->backing_object_offset);
1715                 }
1716
1717                 if (next_object == NULL) {
1718                         /*
1719                          * If there's no object left, fill the page in the top
1720                          * object with zeros.
1721                          */
1722                         if (fs->object != fs->first_object) {
1723 #if 0
1724                                 if (fs->first_object->backing_object !=
1725                                     fs->object) {
1726                                         vm_object_hold(fs->first_object->backing_object);
1727                                 }
1728 #endif
1729                                 vm_object_chain_release_all(
1730                                         fs->first_object->backing_object,
1731                                         fs->object);
1732 #if 0
1733                                 if (fs->first_object->backing_object !=
1734                                     fs->object) {
1735                                         vm_object_drop(fs->first_object->backing_object);
1736                                 }
1737 #endif
1738                                 vm_object_pip_wakeup(fs->object);
1739                                 vm_object_drop(fs->object);
1740                                 fs->object = fs->first_object;
1741                                 pindex = first_pindex;
1742                                 fs->m = fs->first_m;
1743                         }
1744                         fs->first_m = NULL;
1745
1746                         /*
1747                          * Zero the page if necessary and mark it valid.
1748                          */
1749                         if ((fs->m->flags & PG_ZERO) == 0) {
1750                                 vm_page_zero_fill(fs->m);
1751                         } else {
1752 #ifdef PMAP_DEBUG
1753                                 pmap_page_assertzero(VM_PAGE_TO_PHYS(fs->m));
1754 #endif
1755                                 vm_page_flag_clear(fs->m, PG_ZERO);
1756                                 mycpu->gd_cnt.v_ozfod++;
1757                         }
1758                         mycpu->gd_cnt.v_zfod++;
1759                         fs->m->valid = VM_PAGE_BITS_ALL;
1760                         break;  /* break to PAGE HAS BEEN FOUND */
1761                 }
1762                 if (fs->object != fs->first_object) {
1763                         vm_object_pip_wakeup(fs->object);
1764                         vm_object_lock_swap();
1765                         vm_object_drop(fs->object);
1766                 }
1767                 KASSERT(fs->object != next_object,
1768                         ("object loop %p", next_object));
1769                 fs->object = next_object;
1770                 vm_object_pip_add(fs->object, 1);
1771         }
1772
1773         /*
1774          * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock
1775          * is held.]
1776          *
1777          * object still held.
1778          *
1779          * local shared variable may be different from fs->shared.
1780          *
1781          * If the page is being written, but isn't already owned by the
1782          * top-level object, we have to copy it into a new page owned by the
1783          * top-level object.
1784          */
1785         KASSERT((fs->m->flags & PG_BUSY) != 0,
1786                 ("vm_fault: not busy after main loop"));
1787
1788         if (fs->object != fs->first_object) {
1789                 /*
1790                  * We only really need to copy if we want to write it.
1791                  */
1792                 if (fault_type & VM_PROT_WRITE) {
1793                         /*
1794                          * This allows pages to be virtually copied from a 
1795                          * backing_object into the first_object, where the 
1796                          * backing object has no other refs to it, and cannot
1797                          * gain any more refs.  Instead of a bcopy, we just 
1798                          * move the page from the backing object to the 
1799                          * first object.  Note that we must mark the page 
1800                          * dirty in the first object so that it will go out 
1801                          * to swap when needed.
1802                          */
1803                         if (
1804                                 /*
1805                                  * Must be holding exclusive locks
1806                                  */
1807                                 fs->first_shared == 0 &&
1808                                 fs->shared == 0 &&
1809                                 /*
1810                                  * Map, if present, has not changed
1811                                  */
1812                                 (fs->map == NULL ||
1813                                 fs->map_generation == fs->map->timestamp) &&
1814                                 /*
1815                                  * Only one shadow object
1816                                  */
1817                                 (fs->object->shadow_count == 1) &&
1818                                 /*
1819                                  * No COW refs, except us
1820                                  */
1821                                 (fs->object->ref_count == 1) &&
1822                                 /*
1823                                  * No one else can look this object up
1824                                  */
1825                                 (fs->object->handle == NULL) &&
1826                                 /*
1827                                  * No other ways to look the object up
1828                                  */
1829                                 ((fs->object->type == OBJT_DEFAULT) ||
1830                                  (fs->object->type == OBJT_SWAP)) &&
1831                                 /*
1832                                  * We don't chase down the shadow chain
1833                                  */
1834                                 (fs->object == fs->first_object->backing_object) &&
1835
1836                                 /*
1837                                  * grab the lock if we need to
1838                                  */
1839                                 (fs->lookup_still_valid ||
1840                                  fs->map == NULL ||
1841                                  lockmgr(&fs->map->lock, LK_EXCLUSIVE|LK_NOWAIT) == 0)
1842                             ) {
1843                                 /*
1844                                  * (first_m) and (m) are both busied.  We have
1845                                  * move (m) into (first_m)'s object/pindex
1846                                  * in an atomic fashion, then free (first_m).
1847                                  *
1848                                  * first_object is held so second remove
1849                                  * followed by the rename should wind
1850                                  * up being atomic.  vm_page_free() might
1851                                  * block so we don't do it until after the
1852                                  * rename.
1853                                  */
1854                                 fs->lookup_still_valid = 1;
1855                                 vm_page_protect(fs->first_m, VM_PROT_NONE);
1856                                 vm_page_remove(fs->first_m);
1857                                 vm_page_rename(fs->m, fs->first_object,
1858                                                first_pindex);
1859                                 vm_page_free(fs->first_m);
1860                                 fs->first_m = fs->m;
1861                                 fs->m = NULL;
1862                                 mycpu->gd_cnt.v_cow_optim++;
1863                         } else {
1864                                 /*
1865                                  * Oh, well, lets copy it.
1866                                  *
1867                                  * Why are we unmapping the original page
1868                                  * here?  Well, in short, not all accessors
1869                                  * of user memory go through the pmap.  The
1870                                  * procfs code doesn't have access user memory
1871                                  * via a local pmap, so vm_fault_page*()
1872                                  * can't call pmap_enter().  And the umtx*()
1873                                  * code may modify the COW'd page via a DMAP
1874                                  * or kernel mapping and not via the pmap,
1875                                  * leaving the original page still mapped
1876                                  * read-only into the pmap.
1877                                  *
1878                                  * So we have to remove the page from at
1879                                  * least the current pmap if it is in it.
1880                                  * Just remove it from all pmaps.
1881                                  */
1882                                 KKASSERT(fs->first_shared == 0);
1883                                 vm_page_copy(fs->m, fs->first_m);
1884                                 vm_page_protect(fs->m, VM_PROT_NONE);
1885                                 vm_page_event(fs->m, VMEVENT_COW);
1886                         }
1887
1888                         /*
1889                          * We no longer need the old page or object.
1890                          */
1891                         if (fs->m)
1892                                 release_page(fs);
1893
1894                         /*
1895                          * We intend to revert to first_object, undo the
1896                          * chain lock through to that.
1897                          */
1898 #if 0
1899                         if (fs->first_object->backing_object != fs->object)
1900                                 vm_object_hold(fs->first_object->backing_object);
1901 #endif
1902                         vm_object_chain_release_all(
1903                                         fs->first_object->backing_object,
1904                                         fs->object);
1905 #if 0
1906                         if (fs->first_object->backing_object != fs->object)
1907                                 vm_object_drop(fs->first_object->backing_object);
1908 #endif
1909
1910                         /*
1911                          * fs->object != fs->first_object due to above 
1912                          * conditional
1913                          */
1914                         vm_object_pip_wakeup(fs->object);
1915                         vm_object_drop(fs->object);
1916
1917                         /*
1918                          * Only use the new page below...
1919                          */
1920                         mycpu->gd_cnt.v_cow_faults++;
1921                         fs->m = fs->first_m;
1922                         fs->object = fs->first_object;
1923                         pindex = first_pindex;
1924                 } else {
1925                         /*
1926                          * If it wasn't a write fault avoid having to copy
1927                          * the page by mapping it read-only.
1928                          */
1929                         fs->prot &= ~VM_PROT_WRITE;
1930                 }
1931         }
1932
1933         /*
1934          * Relock the map if necessary, then check the generation count.
1935          * relock_map() will update fs->timestamp to account for the
1936          * relocking if necessary.
1937          *
1938          * If the count has changed after relocking then all sorts of
1939          * crap may have happened and we have to retry.
1940          *
1941          * NOTE: The relock_map() can fail due to a deadlock against
1942          *       the vm_page we are holding BUSY.
1943          */
1944         if (fs->lookup_still_valid == FALSE && fs->map) {
1945                 if (relock_map(fs) ||
1946                     fs->map->timestamp != fs->map_generation) {
1947                         release_page(fs);
1948                         vm_object_pip_wakeup(fs->first_object);
1949                         vm_object_chain_release_all(fs->first_object,
1950                                                     fs->object);
1951                         if (fs->object != fs->first_object)
1952                                 vm_object_drop(fs->object);
1953                         unlock_and_deallocate(fs);
1954                         return (KERN_TRY_AGAIN);
1955                 }
1956         }
1957
1958         /*
1959          * If the fault is a write, we know that this page is being
1960          * written NOW so dirty it explicitly to save on pmap_is_modified()
1961          * calls later.
1962          *
1963          * If this is a NOSYNC mmap we do not want to set PG_NOSYNC
1964          * if the page is already dirty to prevent data written with
1965          * the expectation of being synced from not being synced.
1966          * Likewise if this entry does not request NOSYNC then make
1967          * sure the page isn't marked NOSYNC.  Applications sharing
1968          * data should use the same flags to avoid ping ponging.
1969          *
1970          * Also tell the backing pager, if any, that it should remove
1971          * any swap backing since the page is now dirty.
1972          */
1973         vm_page_activate(fs->m);
1974         if (fs->prot & VM_PROT_WRITE) {
1975                 vm_object_set_writeable_dirty(fs->m->object);
1976                 vm_set_nosync(fs->m, fs->entry);
1977                 if (fs->fault_flags & VM_FAULT_DIRTY) {
1978                         vm_page_dirty(fs->m);
1979                         swap_pager_unswapped(fs->m);
1980                 }
1981         }
1982
1983         vm_object_pip_wakeup(fs->first_object);
1984         vm_object_chain_release_all(fs->first_object, fs->object);
1985         if (fs->object != fs->first_object)
1986                 vm_object_drop(fs->object);
1987
1988         /*
1989          * Page had better still be busy.  We are still locked up and 
1990          * fs->object will have another PIP reference if it is not equal
1991          * to fs->first_object.
1992          */
1993         KASSERT(fs->m->flags & PG_BUSY,
1994                 ("vm_fault: page %p not busy!", fs->m));
1995
1996         /*
1997          * Sanity check: page must be completely valid or it is not fit to
1998          * map into user space.  vm_pager_get_pages() ensures this.
1999          */
2000         if (fs->m->valid != VM_PAGE_BITS_ALL) {
2001                 vm_page_zero_invalid(fs->m, TRUE);
2002                 kprintf("Warning: page %p partially invalid on fault\n", fs->m);
2003         }
2004         vm_page_flag_clear(fs->m, PG_ZERO);
2005
2006         return (KERN_SUCCESS);
2007 }
2008
2009 /*
2010  * Hold each of the physical pages that are mapped by the specified range of
2011  * virtual addresses, ["addr", "addr" + "len"), if those mappings are valid
2012  * and allow the specified types of access, "prot".  If all of the implied
2013  * pages are successfully held, then the number of held pages is returned
2014  * together with pointers to those pages in the array "ma".  However, if any
2015  * of the pages cannot be held, -1 is returned.
2016  */
2017 int
2018 vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len,
2019     vm_prot_t prot, vm_page_t *ma, int max_count)
2020 {
2021         vm_offset_t start, end;
2022         int i, npages, error;
2023
2024         start = trunc_page(addr);
2025         end = round_page(addr + len);
2026
2027         npages = howmany(end - start, PAGE_SIZE);
2028
2029         if (npages > max_count)
2030                 return -1;
2031
2032         for (i = 0; i < npages; i++) {
2033                 // XXX error handling
2034                 ma[i] = vm_fault_page_quick(start + (i * PAGE_SIZE),
2035                         prot,
2036                         &error);
2037         }
2038
2039         return npages;
2040 }
2041
2042 /*
2043  * Wire down a range of virtual addresses in a map.  The entry in question
2044  * should be marked in-transition and the map must be locked.  We must
2045  * release the map temporarily while faulting-in the page to avoid a
2046  * deadlock.  Note that the entry may be clipped while we are blocked but
2047  * will never be freed.
2048  *
2049  * No requirements.
2050  */
2051 int
2052 vm_fault_wire(vm_map_t map, vm_map_entry_t entry,
2053               boolean_t user_wire, int kmflags)
2054 {
2055         boolean_t fictitious;
2056         vm_offset_t start;
2057         vm_offset_t end;
2058         vm_offset_t va;
2059         vm_paddr_t pa;
2060         vm_page_t m;
2061         pmap_t pmap;
2062         int rv;
2063         int wire_prot;
2064         int fault_flags;
2065
2066         lwkt_gettoken(&map->token);
2067
2068         if (user_wire) {
2069                 wire_prot = VM_PROT_READ;
2070                 fault_flags = VM_FAULT_USER_WIRE;
2071         } else {
2072                 wire_prot = VM_PROT_READ | VM_PROT_WRITE;
2073                 fault_flags = VM_FAULT_CHANGE_WIRING;
2074         }
2075         if (kmflags & KM_NOTLBSYNC)
2076                 wire_prot |= VM_PROT_NOSYNC;
2077
2078         pmap = vm_map_pmap(map);
2079         start = entry->start;
2080         end = entry->end;
2081         switch(entry->maptype) {
2082         case VM_MAPTYPE_NORMAL:
2083         case VM_MAPTYPE_VPAGETABLE:
2084                 fictitious = entry->object.vm_object &&
2085                             ((entry->object.vm_object->type == OBJT_DEVICE) ||
2086                              (entry->object.vm_object->type == OBJT_MGTDEVICE));
2087                 break;
2088         case VM_MAPTYPE_UKSMAP:
2089                 fictitious = TRUE;
2090                 break;
2091         default:
2092                 fictitious = FALSE;
2093                 break;
2094         }
2095
2096         if (entry->eflags & MAP_ENTRY_KSTACK)
2097                 start += PAGE_SIZE;
2098         map->timestamp++;
2099         vm_map_unlock(map);
2100
2101         /*
2102          * We simulate a fault to get the page and enter it in the physical
2103          * map.
2104          */
2105         for (va = start; va < end; va += PAGE_SIZE) {
2106                 rv = vm_fault(map, va, wire_prot, fault_flags);
2107                 if (rv) {
2108                         while (va > start) {
2109                                 va -= PAGE_SIZE;
2110                                 if ((pa = pmap_extract(pmap, va)) == 0)
2111                                         continue;
2112                                 pmap_change_wiring(pmap, va, FALSE, entry);
2113                                 if (!fictitious) {
2114                                         m = PHYS_TO_VM_PAGE(pa);
2115                                         vm_page_busy_wait(m, FALSE, "vmwrpg");
2116                                         vm_page_unwire(m, 1);
2117                                         vm_page_wakeup(m);
2118                                 }
2119                         }
2120                         goto done;
2121                 }
2122         }
2123         rv = KERN_SUCCESS;
2124 done:
2125         vm_map_lock(map);
2126         lwkt_reltoken(&map->token);
2127         return (rv);
2128 }
2129
2130 /*
2131  * Unwire a range of virtual addresses in a map.  The map should be
2132  * locked.
2133  */
2134 void
2135 vm_fault_unwire(vm_map_t map, vm_map_entry_t entry)
2136 {
2137         boolean_t fictitious;
2138         vm_offset_t start;
2139         vm_offset_t end;
2140         vm_offset_t va;
2141         vm_paddr_t pa;
2142         vm_page_t m;
2143         pmap_t pmap;
2144
2145         lwkt_gettoken(&map->token);
2146
2147         pmap = vm_map_pmap(map);
2148         start = entry->start;
2149         end = entry->end;
2150         fictitious = entry->object.vm_object &&
2151                         ((entry->object.vm_object->type == OBJT_DEVICE) ||
2152                          (entry->object.vm_object->type == OBJT_MGTDEVICE));
2153         if (entry->eflags & MAP_ENTRY_KSTACK)
2154                 start += PAGE_SIZE;
2155
2156         /*
2157          * Since the pages are wired down, we must be able to get their
2158          * mappings from the physical map system.
2159          */
2160         for (va = start; va < end; va += PAGE_SIZE) {
2161                 pa = pmap_extract(pmap, va);
2162                 if (pa != 0) {
2163                         pmap_change_wiring(pmap, va, FALSE, entry);
2164                         if (!fictitious) {
2165                                 m = PHYS_TO_VM_PAGE(pa);
2166                                 vm_page_busy_wait(m, FALSE, "vmwupg");
2167                                 vm_page_unwire(m, 1);
2168                                 vm_page_wakeup(m);
2169                         }
2170                 }
2171         }
2172         lwkt_reltoken(&map->token);
2173 }
2174
2175 /*
2176  * Copy all of the pages from a wired-down map entry to another.
2177  *
2178  * The source and destination maps must be locked for write.
2179  * The source and destination maps token must be held
2180  * The source map entry must be wired down (or be a sharing map
2181  * entry corresponding to a main map entry that is wired down).
2182  *
2183  * No other requirements.
2184  *
2185  * XXX do segment optimization
2186  */
2187 void
2188 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
2189                     vm_map_entry_t dst_entry, vm_map_entry_t src_entry)
2190 {
2191         vm_object_t dst_object;
2192         vm_object_t src_object;
2193         vm_ooffset_t dst_offset;
2194         vm_ooffset_t src_offset;
2195         vm_prot_t prot;
2196         vm_offset_t vaddr;
2197         vm_page_t dst_m;
2198         vm_page_t src_m;
2199
2200         src_object = src_entry->object.vm_object;
2201         src_offset = src_entry->offset;
2202
2203         /*
2204          * Create the top-level object for the destination entry. (Doesn't
2205          * actually shadow anything - we copy the pages directly.)
2206          */
2207         vm_map_entry_allocate_object(dst_entry);
2208         dst_object = dst_entry->object.vm_object;
2209
2210         prot = dst_entry->max_protection;
2211
2212         /*
2213          * Loop through all of the pages in the entry's range, copying each
2214          * one from the source object (it should be there) to the destination
2215          * object.
2216          */
2217         vm_object_hold(src_object);
2218         vm_object_hold(dst_object);
2219         for (vaddr = dst_entry->start, dst_offset = 0;
2220             vaddr < dst_entry->end;
2221             vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) {
2222
2223                 /*
2224                  * Allocate a page in the destination object
2225                  */
2226                 do {
2227                         dst_m = vm_page_alloc(dst_object,
2228                                               OFF_TO_IDX(dst_offset),
2229                                               VM_ALLOC_NORMAL);
2230                         if (dst_m == NULL) {
2231                                 vm_wait(0);
2232                         }
2233                 } while (dst_m == NULL);
2234
2235                 /*
2236                  * Find the page in the source object, and copy it in.
2237                  * (Because the source is wired down, the page will be in
2238                  * memory.)
2239                  */
2240                 src_m = vm_page_lookup(src_object,
2241                                        OFF_TO_IDX(dst_offset + src_offset));
2242                 if (src_m == NULL)
2243                         panic("vm_fault_copy_wired: page missing");
2244
2245                 vm_page_copy(src_m, dst_m);
2246                 vm_page_event(src_m, VMEVENT_COW);
2247
2248                 /*
2249                  * Enter it in the pmap...
2250                  */
2251
2252                 vm_page_flag_clear(dst_m, PG_ZERO);
2253                 pmap_enter(dst_map->pmap, vaddr, dst_m, prot, FALSE, dst_entry);
2254
2255                 /*
2256                  * Mark it no longer busy, and put it on the active list.
2257                  */
2258                 vm_page_activate(dst_m);
2259                 vm_page_wakeup(dst_m);
2260         }
2261         vm_object_drop(dst_object);
2262         vm_object_drop(src_object);
2263 }
2264
2265 #if 0
2266
2267 /*
2268  * This routine checks around the requested page for other pages that
2269  * might be able to be faulted in.  This routine brackets the viable
2270  * pages for the pages to be paged in.
2271  *
2272  * Inputs:
2273  *      m, rbehind, rahead
2274  *
2275  * Outputs:
2276  *  marray (array of vm_page_t), reqpage (index of requested page)
2277  *
2278  * Return value:
2279  *  number of pages in marray
2280  */
2281 static int
2282 vm_fault_additional_pages(vm_page_t m, int rbehind, int rahead,
2283                           vm_page_t *marray, int *reqpage)
2284 {
2285         int i,j;
2286         vm_object_t object;
2287         vm_pindex_t pindex, startpindex, endpindex, tpindex;
2288         vm_page_t rtm;
2289         int cbehind, cahead;
2290
2291         object = m->object;
2292         pindex = m->pindex;
2293
2294         /*
2295          * we don't fault-ahead for device pager
2296          */
2297         if ((object->type == OBJT_DEVICE) ||
2298             (object->type == OBJT_MGTDEVICE)) {
2299                 *reqpage = 0;
2300                 marray[0] = m;
2301                 return 1;
2302         }
2303
2304         /*
2305          * if the requested page is not available, then give up now
2306          */
2307         if (!vm_pager_has_page(object, pindex, &cbehind, &cahead)) {
2308                 *reqpage = 0;   /* not used by caller, fix compiler warn */
2309                 return 0;
2310         }
2311
2312         if ((cbehind == 0) && (cahead == 0)) {
2313                 *reqpage = 0;
2314                 marray[0] = m;
2315                 return 1;
2316         }
2317
2318         if (rahead > cahead) {
2319                 rahead = cahead;
2320         }
2321
2322         if (rbehind > cbehind) {
2323                 rbehind = cbehind;
2324         }
2325
2326         /*
2327          * Do not do any readahead if we have insufficient free memory.
2328          *
2329          * XXX code was broken disabled before and has instability
2330          * with this conditonal fixed, so shortcut for now.
2331          */
2332         if (burst_fault == 0 || vm_page_count_severe()) {
2333                 marray[0] = m;
2334                 *reqpage = 0;
2335                 return 1;
2336         }
2337
2338         /*
2339          * scan backward for the read behind pages -- in memory 
2340          *
2341          * Assume that if the page is not found an interrupt will not
2342          * create it.  Theoretically interrupts can only remove (busy)
2343          * pages, not create new associations.
2344          */
2345         if (pindex > 0) {
2346                 if (rbehind > pindex) {
2347                         rbehind = pindex;
2348                         startpindex = 0;
2349                 } else {
2350                         startpindex = pindex - rbehind;
2351                 }
2352
2353                 vm_object_hold(object);
2354                 for (tpindex = pindex; tpindex > startpindex; --tpindex) {
2355                         if (vm_page_lookup(object, tpindex - 1))
2356                                 break;
2357                 }
2358
2359                 i = 0;
2360                 while (tpindex < pindex) {
2361                         rtm = vm_page_alloc(object, tpindex, VM_ALLOC_SYSTEM |
2362                                                              VM_ALLOC_NULL_OK);
2363                         if (rtm == NULL) {
2364                                 for (j = 0; j < i; j++) {
2365                                         vm_page_free(marray[j]);
2366                                 }
2367                                 vm_object_drop(object);
2368                                 marray[0] = m;
2369                                 *reqpage = 0;
2370                                 return 1;
2371                         }
2372                         marray[i] = rtm;
2373                         ++i;
2374                         ++tpindex;
2375                 }
2376                 vm_object_drop(object);
2377         } else {
2378                 i = 0;
2379         }
2380
2381         /*
2382          * Assign requested page
2383          */
2384         marray[i] = m;
2385         *reqpage = i;
2386         ++i;
2387
2388         /*
2389          * Scan forwards for read-ahead pages
2390          */
2391         tpindex = pindex + 1;
2392         endpindex = tpindex + rahead;
2393         if (endpindex > object->size)
2394                 endpindex = object->size;
2395
2396         vm_object_hold(object);
2397         while (tpindex < endpindex) {
2398                 if (vm_page_lookup(object, tpindex))
2399                         break;
2400                 rtm = vm_page_alloc(object, tpindex, VM_ALLOC_SYSTEM |
2401                                                      VM_ALLOC_NULL_OK);
2402                 if (rtm == NULL)
2403                         break;
2404                 marray[i] = rtm;
2405                 ++i;
2406                 ++tpindex;
2407         }
2408         vm_object_drop(object);
2409
2410         return (i);
2411 }
2412
2413 #endif
2414
2415 /*
2416  * vm_prefault() provides a quick way of clustering pagefaults into a
2417  * processes address space.  It is a "cousin" of pmap_object_init_pt,
2418  * except it runs at page fault time instead of mmap time.
2419  *
2420  * vm.fast_fault        Enables pre-faulting zero-fill pages
2421  *
2422  * vm.prefault_pages    Number of pages (1/2 negative, 1/2 positive) to
2423  *                      prefault.  Scan stops in either direction when
2424  *                      a page is found to already exist.
2425  *
2426  * This code used to be per-platform pmap_prefault().  It is now
2427  * machine-independent and enhanced to also pre-fault zero-fill pages
2428  * (see vm.fast_fault) as well as make them writable, which greatly
2429  * reduces the number of page faults programs incur.
2430  *
2431  * Application performance when pre-faulting zero-fill pages is heavily
2432  * dependent on the application.  Very tiny applications like /bin/echo
2433  * lose a little performance while applications of any appreciable size
2434  * gain performance.  Prefaulting multiple pages also reduces SMP
2435  * congestion and can improve SMP performance significantly.
2436  *
2437  * NOTE!  prot may allow writing but this only applies to the top level
2438  *        object.  If we wind up mapping a page extracted from a backing
2439  *        object we have to make sure it is read-only.
2440  *
2441  * NOTE!  The caller has already handled any COW operations on the
2442  *        vm_map_entry via the normal fault code.  Do NOT call this
2443  *        shortcut unless the normal fault code has run on this entry.
2444  *
2445  * The related map must be locked.
2446  * No other requirements.
2447  */
2448 static int vm_prefault_pages = 8;
2449 SYSCTL_INT(_vm, OID_AUTO, prefault_pages, CTLFLAG_RW, &vm_prefault_pages, 0,
2450            "Maximum number of pages to pre-fault");
2451 static int vm_fast_fault = 1;
2452 SYSCTL_INT(_vm, OID_AUTO, fast_fault, CTLFLAG_RW, &vm_fast_fault, 0,
2453            "Burst fault zero-fill regions");
2454
2455 /*
2456  * Set PG_NOSYNC if the map entry indicates so, but only if the page
2457  * is not already dirty by other means.  This will prevent passive
2458  * filesystem syncing as well as 'sync' from writing out the page.
2459  */
2460 static void
2461 vm_set_nosync(vm_page_t m, vm_map_entry_t entry)
2462 {
2463         if (entry->eflags & MAP_ENTRY_NOSYNC) {
2464                 if (m->dirty == 0)
2465                         vm_page_flag_set(m, PG_NOSYNC);
2466         } else {
2467                 vm_page_flag_clear(m, PG_NOSYNC);
2468         }
2469 }
2470
2471 static void
2472 vm_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry, int prot,
2473             int fault_flags)
2474 {
2475         struct lwp *lp;
2476         vm_page_t m;
2477         vm_offset_t addr;
2478         vm_pindex_t index;
2479         vm_pindex_t pindex;
2480         vm_object_t object;
2481         int pprot;
2482         int i;
2483         int noneg;
2484         int nopos;
2485         int maxpages;
2486
2487         /*
2488          * Get stable max count value, disabled if set to 0
2489          */
2490         maxpages = vm_prefault_pages;
2491         cpu_ccfence();
2492         if (maxpages <= 0)
2493                 return;
2494
2495         /*
2496          * We do not currently prefault mappings that use virtual page
2497          * tables.  We do not prefault foreign pmaps.
2498          */
2499         if (entry->maptype != VM_MAPTYPE_NORMAL)
2500                 return;
2501         lp = curthread->td_lwp;
2502         if (lp == NULL || (pmap != vmspace_pmap(lp->lwp_vmspace)))
2503                 return;
2504
2505         /*
2506          * Limit pre-fault count to 1024 pages.
2507          */
2508         if (maxpages > 1024)
2509                 maxpages = 1024;
2510
2511         object = entry->object.vm_object;
2512         KKASSERT(object != NULL);
2513         KKASSERT(object == entry->object.vm_object);
2514         vm_object_hold(object);
2515         vm_object_chain_acquire(object, 0);
2516
2517         noneg = 0;
2518         nopos = 0;
2519         for (i = 0; i < maxpages; ++i) {
2520                 vm_object_t lobject;
2521                 vm_object_t nobject;
2522                 int allocated = 0;
2523                 int error;
2524
2525                 /*
2526                  * This can eat a lot of time on a heavily contended
2527                  * machine so yield on the tick if needed.
2528                  */
2529                 if ((i & 7) == 7)
2530                         lwkt_yield();
2531
2532                 /*
2533                  * Calculate the page to pre-fault, stopping the scan in
2534                  * each direction separately if the limit is reached.
2535                  */
2536                 if (i & 1) {
2537                         if (noneg)
2538                                 continue;
2539                         addr = addra - ((i + 1) >> 1) * PAGE_SIZE;
2540                 } else {
2541                         if (nopos)
2542                                 continue;
2543                         addr = addra + ((i + 2) >> 1) * PAGE_SIZE;
2544                 }
2545                 if (addr < entry->start) {
2546                         noneg = 1;
2547                         if (noneg && nopos)
2548                                 break;
2549                         continue;
2550                 }
2551                 if (addr >= entry->end) {
2552                         nopos = 1;
2553                         if (noneg && nopos)
2554                                 break;
2555                         continue;
2556                 }
2557
2558                 /*
2559                  * Skip pages already mapped, and stop scanning in that
2560                  * direction.  When the scan terminates in both directions
2561                  * we are done.
2562                  */
2563                 if (pmap_prefault_ok(pmap, addr) == 0) {
2564                         if (i & 1)
2565                                 noneg = 1;
2566                         else
2567                                 nopos = 1;
2568                         if (noneg && nopos)
2569                                 break;
2570                         continue;
2571                 }
2572
2573                 /*
2574                  * Follow the VM object chain to obtain the page to be mapped
2575                  * into the pmap.
2576                  *
2577                  * If we reach the terminal object without finding a page
2578                  * and we determine it would be advantageous, then allocate
2579                  * a zero-fill page for the base object.  The base object
2580                  * is guaranteed to be OBJT_DEFAULT for this case.
2581                  *
2582                  * In order to not have to check the pager via *haspage*()
2583                  * we stop if any non-default object is encountered.  e.g.
2584                  * a vnode or swap object would stop the loop.
2585                  */
2586                 index = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
2587                 lobject = object;
2588                 pindex = index;
2589                 pprot = prot;
2590
2591                 KKASSERT(lobject == entry->object.vm_object);
2592                 /*vm_object_hold(lobject); implied */
2593
2594                 while ((m = vm_page_lookup_busy_try(lobject, pindex,
2595                                                     TRUE, &error)) == NULL) {
2596                         if (lobject->type != OBJT_DEFAULT)
2597                                 break;
2598                         if (lobject->backing_object == NULL) {
2599                                 if (vm_fast_fault == 0)
2600                                         break;
2601                                 if ((prot & VM_PROT_WRITE) == 0 ||
2602                                     vm_page_count_min(0)) {
2603                                         break;
2604                                 }
2605
2606                                 /*
2607                                  * NOTE: Allocated from base object
2608                                  */
2609                                 m = vm_page_alloc(object, index,
2610                                                   VM_ALLOC_NORMAL |
2611                                                   VM_ALLOC_ZERO |
2612                                                   VM_ALLOC_USE_GD |
2613                                                   VM_ALLOC_NULL_OK);
2614                                 if (m == NULL)
2615                                         break;
2616                                 allocated = 1;
2617                                 pprot = prot;
2618                                 /* lobject = object .. not needed */
2619                                 break;
2620                         }
2621                         if (lobject->backing_object_offset & PAGE_MASK)
2622                                 break;
2623                         nobject = lobject->backing_object;
2624                         vm_object_hold(nobject);
2625                         KKASSERT(nobject == lobject->backing_object);
2626                         pindex += lobject->backing_object_offset >> PAGE_SHIFT;
2627                         if (lobject != object) {
2628                                 vm_object_lock_swap();
2629                                 vm_object_drop(lobject);
2630                         }
2631                         lobject = nobject;
2632                         pprot &= ~VM_PROT_WRITE;
2633                         vm_object_chain_acquire(lobject, 0);
2634                 }
2635
2636                 /*
2637                  * NOTE: A non-NULL (m) will be associated with lobject if
2638                  *       it was found there, otherwise it is probably a
2639                  *       zero-fill page associated with the base object.
2640                  *
2641                  * Give-up if no page is available.
2642                  */
2643                 if (m == NULL) {
2644                         if (lobject != object) {
2645 #if 0
2646                                 if (object->backing_object != lobject)
2647                                         vm_object_hold(object->backing_object);
2648 #endif
2649                                 vm_object_chain_release_all(
2650                                         object->backing_object, lobject);
2651 #if 0
2652                                 if (object->backing_object != lobject)
2653                                         vm_object_drop(object->backing_object);
2654 #endif
2655                                 vm_object_drop(lobject);
2656                         }
2657                         break;
2658                 }
2659
2660                 /*
2661                  * The object must be marked dirty if we are mapping a
2662                  * writable page.  m->object is either lobject or object,
2663                  * both of which are still held.  Do this before we
2664                  * potentially drop the object.
2665                  */
2666                 if (pprot & VM_PROT_WRITE)
2667                         vm_object_set_writeable_dirty(m->object);
2668
2669                 /*
2670                  * Do not conditionalize on PG_RAM.  If pages are present in
2671                  * the VM system we assume optimal caching.  If caching is
2672                  * not optimal the I/O gravy train will be restarted when we
2673                  * hit an unavailable page.  We do not want to try to restart
2674                  * the gravy train now because we really don't know how much
2675                  * of the object has been cached.  The cost for restarting
2676                  * the gravy train should be low (since accesses will likely
2677                  * be I/O bound anyway).
2678                  */
2679                 if (lobject != object) {
2680 #if 0
2681                         if (object->backing_object != lobject)
2682                                 vm_object_hold(object->backing_object);
2683 #endif
2684                         vm_object_chain_release_all(object->backing_object,
2685                                                     lobject);
2686 #if 0
2687                         if (object->backing_object != lobject)
2688                                 vm_object_drop(object->backing_object);
2689 #endif
2690                         vm_object_drop(lobject);
2691                 }
2692
2693                 /*
2694                  * Enter the page into the pmap if appropriate.  If we had
2695                  * allocated the page we have to place it on a queue.  If not
2696                  * we just have to make sure it isn't on the cache queue
2697                  * (pages on the cache queue are not allowed to be mapped).
2698                  */
2699                 if (allocated) {
2700                         /*
2701                          * Page must be zerod.
2702                          */
2703                         if ((m->flags & PG_ZERO) == 0) {
2704                                 vm_page_zero_fill(m);
2705                         } else {
2706 #ifdef PMAP_DEBUG
2707                                 pmap_page_assertzero(
2708                                                 VM_PAGE_TO_PHYS(m));
2709 #endif
2710                                 vm_page_flag_clear(m, PG_ZERO);
2711                                 mycpu->gd_cnt.v_ozfod++;
2712                         }
2713                         mycpu->gd_cnt.v_zfod++;
2714                         m->valid = VM_PAGE_BITS_ALL;
2715
2716                         /*
2717                          * Handle dirty page case
2718                          */
2719                         if (pprot & VM_PROT_WRITE)
2720                                 vm_set_nosync(m, entry);
2721                         pmap_enter(pmap, addr, m, pprot, 0, entry);
2722                         mycpu->gd_cnt.v_vm_faults++;
2723                         if (curthread->td_lwp)
2724                                 ++curthread->td_lwp->lwp_ru.ru_minflt;
2725                         vm_page_deactivate(m);
2726                         if (pprot & VM_PROT_WRITE) {
2727                                 /*vm_object_set_writeable_dirty(m->object);*/
2728                                 vm_set_nosync(m, entry);
2729                                 if (fault_flags & VM_FAULT_DIRTY) {
2730                                         vm_page_dirty(m);
2731                                         /*XXX*/
2732                                         swap_pager_unswapped(m);
2733                                 }
2734                         }
2735                         vm_page_wakeup(m);
2736                 } else if (error) {
2737                         /* couldn't busy page, no wakeup */
2738                 } else if (
2739                     ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
2740                     (m->flags & PG_FICTITIOUS) == 0) {
2741                         /*
2742                          * A fully valid page not undergoing soft I/O can
2743                          * be immediately entered into the pmap.
2744                          */
2745                         if ((m->queue - m->pc) == PQ_CACHE)
2746                                 vm_page_deactivate(m);
2747                         if (pprot & VM_PROT_WRITE) {
2748                                 /*vm_object_set_writeable_dirty(m->object);*/
2749                                 vm_set_nosync(m, entry);
2750                                 if (fault_flags & VM_FAULT_DIRTY) {
2751                                         vm_page_dirty(m);
2752                                         /*XXX*/
2753                                         swap_pager_unswapped(m);
2754                                 }
2755                         }
2756                         if (pprot & VM_PROT_WRITE)
2757                                 vm_set_nosync(m, entry);
2758                         pmap_enter(pmap, addr, m, pprot, 0, entry);
2759                         mycpu->gd_cnt.v_vm_faults++;
2760                         if (curthread->td_lwp)
2761                                 ++curthread->td_lwp->lwp_ru.ru_minflt;
2762                         vm_page_wakeup(m);
2763                 } else {
2764                         vm_page_wakeup(m);
2765                 }
2766         }
2767         vm_object_chain_release(object);
2768         vm_object_drop(object);
2769 }
2770
2771 /*
2772  * Object can be held shared
2773  */
2774 static void
2775 vm_prefault_quick(pmap_t pmap, vm_offset_t addra,
2776                   vm_map_entry_t entry, int prot, int fault_flags)
2777 {
2778         struct lwp *lp;
2779         vm_page_t m;
2780         vm_offset_t addr;
2781         vm_pindex_t pindex;
2782         vm_object_t object;
2783         int i;
2784         int noneg;
2785         int nopos;
2786         int maxpages;
2787
2788         /*
2789          * Get stable max count value, disabled if set to 0
2790          */
2791         maxpages = vm_prefault_pages;
2792         cpu_ccfence();
2793         if (maxpages <= 0)
2794                 return;
2795
2796         /*
2797          * We do not currently prefault mappings that use virtual page
2798          * tables.  We do not prefault foreign pmaps.
2799          */
2800         if (entry->maptype != VM_MAPTYPE_NORMAL)
2801                 return;
2802         lp = curthread->td_lwp;
2803         if (lp == NULL || (pmap != vmspace_pmap(lp->lwp_vmspace)))
2804                 return;
2805         object = entry->object.vm_object;
2806         if (object->backing_object != NULL)
2807                 return;
2808         ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2809
2810         /*
2811          * Limit pre-fault count to 1024 pages.
2812          */
2813         if (maxpages > 1024)
2814                 maxpages = 1024;
2815
2816         noneg = 0;
2817         nopos = 0;
2818         for (i = 0; i < maxpages; ++i) {
2819                 int error;
2820
2821                 /*
2822                  * Calculate the page to pre-fault, stopping the scan in
2823                  * each direction separately if the limit is reached.
2824                  */
2825                 if (i & 1) {
2826                         if (noneg)
2827                                 continue;
2828                         addr = addra - ((i + 1) >> 1) * PAGE_SIZE;
2829                 } else {
2830                         if (nopos)
2831                                 continue;
2832                         addr = addra + ((i + 2) >> 1) * PAGE_SIZE;
2833                 }
2834                 if (addr < entry->start) {
2835                         noneg = 1;
2836                         if (noneg && nopos)
2837                                 break;
2838                         continue;
2839                 }
2840                 if (addr >= entry->end) {
2841                         nopos = 1;
2842                         if (noneg && nopos)
2843                                 break;
2844                         continue;
2845                 }
2846
2847                 /*
2848                  * Skip pages already mapped, and stop scanning in that
2849                  * direction.  When the scan terminates in both directions
2850                  * we are done.
2851                  */
2852                 if (pmap_prefault_ok(pmap, addr) == 0) {
2853                         if (i & 1)
2854                                 noneg = 1;
2855                         else
2856                                 nopos = 1;
2857                         if (noneg && nopos)
2858                                 break;
2859                         continue;
2860                 }
2861
2862                 /*
2863                  * Follow the VM object chain to obtain the page to be mapped
2864                  * into the pmap.  This version of the prefault code only
2865                  * works with terminal objects.
2866                  *
2867                  * WARNING!  We cannot call swap_pager_unswapped() with a
2868                  *           shared token.
2869                  */
2870                 pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
2871
2872                 m = vm_page_lookup_busy_try(object, pindex, TRUE, &error);
2873                 if (m == NULL || error)
2874                         continue;
2875
2876                 if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
2877                     (m->flags & PG_FICTITIOUS) == 0 &&
2878                     ((m->flags & PG_SWAPPED) == 0 ||
2879                      (prot & VM_PROT_WRITE) == 0 ||
2880                      (fault_flags & VM_FAULT_DIRTY) == 0)) {
2881                         /*
2882                          * A fully valid page not undergoing soft I/O can
2883                          * be immediately entered into the pmap.
2884                          */
2885                         if ((m->queue - m->pc) == PQ_CACHE)
2886                                 vm_page_deactivate(m);
2887                         if (prot & VM_PROT_WRITE) {
2888                                 vm_object_set_writeable_dirty(m->object);
2889                                 vm_set_nosync(m, entry);
2890                                 if (fault_flags & VM_FAULT_DIRTY) {
2891                                         vm_page_dirty(m);
2892                                         /*XXX*/
2893                                         swap_pager_unswapped(m);
2894                                 }
2895                         }
2896                         pmap_enter(pmap, addr, m, prot, 0, entry);
2897                         mycpu->gd_cnt.v_vm_faults++;
2898                         if (curthread->td_lwp)
2899                                 ++curthread->td_lwp->lwp_ru.ru_minflt;
2900                 }
2901                 vm_page_wakeup(m);
2902         }
2903 }