kernel - refactor vm_page busy
[dragonfly.git] / sys / vm / vm_fault.c
1 /*
2  * Copyright (c) 2003-2014 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * ---
35  *
36  * Copyright (c) 1991, 1993
37  *      The Regents of the University of California.  All rights reserved.
38  * Copyright (c) 1994 John S. Dyson
39  * All rights reserved.
40  * Copyright (c) 1994 David Greenman
41  * All rights reserved.
42  *
43  *
44  * This code is derived from software contributed to Berkeley by
45  * The Mach Operating System project at Carnegie-Mellon University.
46  *
47  * Redistribution and use in source and binary forms, with or without
48  * modification, are permitted provided that the following conditions
49  * are met:
50  * 1. Redistributions of source code must retain the above copyright
51  *    notice, this list of conditions and the following disclaimer.
52  * 2. Redistributions in binary form must reproduce the above copyright
53  *    notice, this list of conditions and the following disclaimer in the
54  *    documentation and/or other materials provided with the distribution.
55  * 3. Neither the name of the University nor the names of its contributors
56  *    may be used to endorse or promote products derived from this software
57  *    without specific prior written permission.
58  *
59  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69  * SUCH DAMAGE.
70  *
71  * ---
72  *
73  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
74  * All rights reserved.
75  *
76  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
77  *
78  * Permission to use, copy, modify and distribute this software and
79  * its documentation is hereby granted, provided that both the copyright
80  * notice and this permission notice appear in all copies of the
81  * software, derivative works or modified versions, and any portions
82  * thereof, and that both notices appear in supporting documentation.
83  *
84  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
85  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
86  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
87  *
88  * Carnegie Mellon requests users of this software to return to
89  *
90  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
91  *  School of Computer Science
92  *  Carnegie Mellon University
93  *  Pittsburgh PA 15213-3890
94  *
95  * any improvements or extensions that they make and grant Carnegie the
96  * rights to redistribute these changes.
97  */
98
99 /*
100  *      Page fault handling module.
101  */
102
103 #include <sys/param.h>
104 #include <sys/systm.h>
105 #include <sys/kernel.h>
106 #include <sys/proc.h>
107 #include <sys/vnode.h>
108 #include <sys/resourcevar.h>
109 #include <sys/vmmeter.h>
110 #include <sys/vkernel.h>
111 #include <sys/lock.h>
112 #include <sys/sysctl.h>
113
114 #include <cpu/lwbuf.h>
115
116 #include <vm/vm.h>
117 #include <vm/vm_param.h>
118 #include <vm/pmap.h>
119 #include <vm/vm_map.h>
120 #include <vm/vm_object.h>
121 #include <vm/vm_page.h>
122 #include <vm/vm_pageout.h>
123 #include <vm/vm_kern.h>
124 #include <vm/vm_pager.h>
125 #include <vm/vnode_pager.h>
126 #include <vm/vm_extern.h>
127
128 #include <sys/thread2.h>
129 #include <vm/vm_page2.h>
130
131 struct faultstate {
132         vm_page_t m;
133         vm_object_t object;
134         vm_pindex_t pindex;
135         vm_prot_t prot;
136         vm_page_t first_m;
137         vm_object_t first_object;
138         vm_prot_t first_prot;
139         vm_map_t map;
140         vm_map_entry_t entry;
141         int lookup_still_valid;
142         int hardfault;
143         int fault_flags;
144         int map_generation;
145         int shared;
146         int first_shared;
147         boolean_t wired;
148         struct vnode *vp;
149 };
150
151 static int debug_fault = 0;
152 SYSCTL_INT(_vm, OID_AUTO, debug_fault, CTLFLAG_RW, &debug_fault, 0, "");
153 static int debug_cluster = 0;
154 SYSCTL_INT(_vm, OID_AUTO, debug_cluster, CTLFLAG_RW, &debug_cluster, 0, "");
155 int vm_shared_fault = 1;
156 TUNABLE_INT("vm.shared_fault", &vm_shared_fault);
157 SYSCTL_INT(_vm, OID_AUTO, shared_fault, CTLFLAG_RW, &vm_shared_fault, 0,
158            "Allow shared token on vm_object");
159
160 static int vm_fault_object(struct faultstate *, vm_pindex_t, vm_prot_t, int);
161 static int vm_fault_vpagetable(struct faultstate *, vm_pindex_t *,
162                         vpte_t, int, int);
163 #if 0
164 static int vm_fault_additional_pages (vm_page_t, int, int, vm_page_t *, int *);
165 #endif
166 static void vm_set_nosync(vm_page_t m, vm_map_entry_t entry);
167 static void vm_prefault(pmap_t pmap, vm_offset_t addra,
168                         vm_map_entry_t entry, int prot, int fault_flags);
169 static void vm_prefault_quick(pmap_t pmap, vm_offset_t addra,
170                         vm_map_entry_t entry, int prot, int fault_flags);
171
172 static __inline void
173 release_page(struct faultstate *fs)
174 {
175         vm_page_deactivate(fs->m);
176         vm_page_wakeup(fs->m);
177         fs->m = NULL;
178 }
179
180 /*
181  * NOTE: Once unlocked any cached fs->entry becomes invalid, any reuse
182  *       requires relocking and then checking the timestamp.
183  *
184  * NOTE: vm_map_lock_read() does not bump fs->map->timestamp so we do
185  *       not have to update fs->map_generation here.
186  *
187  * NOTE: This function can fail due to a deadlock against the caller's
188  *       holding of a vm_page BUSY.
189  */
190 static __inline int
191 relock_map(struct faultstate *fs)
192 {
193         int error;
194
195         if (fs->lookup_still_valid == FALSE && fs->map) {
196                 error = vm_map_lock_read_to(fs->map);
197                 if (error == 0)
198                         fs->lookup_still_valid = TRUE;
199         } else {
200                 error = 0;
201         }
202         return error;
203 }
204
205 static __inline void
206 unlock_map(struct faultstate *fs)
207 {
208         if (fs->lookup_still_valid && fs->map) {
209                 vm_map_lookup_done(fs->map, fs->entry, 0);
210                 fs->lookup_still_valid = FALSE;
211         }
212 }
213
214 /*
215  * Clean up after a successful call to vm_fault_object() so another call
216  * to vm_fault_object() can be made.
217  */
218 static void
219 _cleanup_successful_fault(struct faultstate *fs, int relock)
220 {
221         /*
222          * We allocated a junk page for a COW operation that did
223          * not occur, the page must be freed.
224          */
225         if (fs->object != fs->first_object) {
226                 KKASSERT(fs->first_shared == 0);
227                 vm_page_free(fs->first_m);
228                 vm_object_pip_wakeup(fs->object);
229                 fs->first_m = NULL;
230         }
231
232         /*
233          * Reset fs->object.
234          */
235         fs->object = fs->first_object;
236         if (relock && fs->lookup_still_valid == FALSE) {
237                 if (fs->map)
238                         vm_map_lock_read(fs->map);
239                 fs->lookup_still_valid = TRUE;
240         }
241 }
242
243 static void
244 _unlock_things(struct faultstate *fs, int dealloc)
245 {
246         _cleanup_successful_fault(fs, 0);
247         if (dealloc) {
248                 /*vm_object_deallocate(fs->first_object);*/
249                 /*fs->first_object = NULL; drop used later on */
250         }
251         unlock_map(fs); 
252         if (fs->vp != NULL) { 
253                 vput(fs->vp);
254                 fs->vp = NULL;
255         }
256 }
257
258 #define unlock_things(fs) _unlock_things(fs, 0)
259 #define unlock_and_deallocate(fs) _unlock_things(fs, 1)
260 #define cleanup_successful_fault(fs) _cleanup_successful_fault(fs, 1)
261
262 /*
263  * TRYPAGER 
264  *
265  * Determine if the pager for the current object *might* contain the page.
266  *
267  * We only need to try the pager if this is not a default object (default
268  * objects are zero-fill and have no real pager), and if we are not taking
269  * a wiring fault or if the FS entry is wired.
270  */
271 #define TRYPAGER(fs)    \
272                 (fs->object->type != OBJT_DEFAULT && \
273                 (((fs->fault_flags & VM_FAULT_WIRE_MASK) == 0) || fs->wired))
274
275 /*
276  * vm_fault:
277  *
278  * Handle a page fault occuring at the given address, requiring the given
279  * permissions, in the map specified.  If successful, the page is inserted
280  * into the associated physical map.
281  *
282  * NOTE: The given address should be truncated to the proper page address.
283  *
284  * KERN_SUCCESS is returned if the page fault is handled; otherwise,
285  * a standard error specifying why the fault is fatal is returned.
286  *
287  * The map in question must be referenced, and remains so.
288  * The caller may hold no locks.
289  * No other requirements.
290  */
291 int
292 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags)
293 {
294         int result;
295         vm_pindex_t first_pindex;
296         struct faultstate fs;
297         struct lwp *lp;
298         struct proc *p;
299         thread_t td;
300         struct vm_map_ilock ilock;
301         int didilock;
302         int growstack;
303         int retry = 0;
304         int inherit_prot;
305
306         inherit_prot = fault_type & VM_PROT_NOSYNC;
307         fs.hardfault = 0;
308         fs.fault_flags = fault_flags;
309         fs.vp = NULL;
310         fs.shared = vm_shared_fault;
311         fs.first_shared = vm_shared_fault;
312         growstack = 1;
313
314         /*
315          * vm_map interactions
316          */
317         td = curthread;
318         if ((lp = td->td_lwp) != NULL)
319                 lp->lwp_flags |= LWP_PAGING;
320
321 RetryFault:
322         /*
323          * Find the vm_map_entry representing the backing store and resolve
324          * the top level object and page index.  This may have the side
325          * effect of executing a copy-on-write on the map entry,
326          * creating a shadow object, or splitting an anonymous entry for
327          * performance, but will not COW any actual VM pages.
328          *
329          * On success fs.map is left read-locked and various other fields 
330          * are initialized but not otherwise referenced or locked.
331          *
332          * NOTE!  vm_map_lookup will try to upgrade the fault_type to
333          *        VM_FAULT_WRITE if the map entry is a virtual page table
334          *        and also writable, so we can set the 'A'accessed bit in
335          *        the virtual page table entry.
336          */
337         fs.map = map;
338         result = vm_map_lookup(&fs.map, vaddr, fault_type,
339                                &fs.entry, &fs.first_object,
340                                &first_pindex, &fs.first_prot, &fs.wired);
341
342         /*
343          * If the lookup failed or the map protections are incompatible,
344          * the fault generally fails.
345          *
346          * The failure could be due to TDF_NOFAULT if vm_map_lookup()
347          * tried to do a COW fault.
348          *
349          * If the caller is trying to do a user wiring we have more work
350          * to do.
351          */
352         if (result != KERN_SUCCESS) {
353                 if (result == KERN_FAILURE_NOFAULT) {
354                         result = KERN_FAILURE;
355                         goto done;
356                 }
357                 if (result != KERN_PROTECTION_FAILURE ||
358                     (fs.fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE)
359                 {
360                         if (result == KERN_INVALID_ADDRESS && growstack &&
361                             map != &kernel_map && curproc != NULL) {
362                                 result = vm_map_growstack(map, vaddr);
363                                 if (result == KERN_SUCCESS) {
364                                         growstack = 0;
365                                         ++retry;
366                                         goto RetryFault;
367                                 }
368                                 result = KERN_FAILURE;
369                         }
370                         goto done;
371                 }
372
373                 /*
374                  * If we are user-wiring a r/w segment, and it is COW, then
375                  * we need to do the COW operation.  Note that we don't
376                  * currently COW RO sections now, because it is NOT desirable
377                  * to COW .text.  We simply keep .text from ever being COW'ed
378                  * and take the heat that one cannot debug wired .text sections.
379                  */
380                 result = vm_map_lookup(&fs.map, vaddr,
381                                        VM_PROT_READ|VM_PROT_WRITE|
382                                         VM_PROT_OVERRIDE_WRITE,
383                                        &fs.entry, &fs.first_object,
384                                        &first_pindex, &fs.first_prot,
385                                        &fs.wired);
386                 if (result != KERN_SUCCESS) {
387                         /* could also be KERN_FAILURE_NOFAULT */
388                         result = KERN_FAILURE;
389                         goto done;
390                 }
391
392                 /*
393                  * If we don't COW now, on a user wire, the user will never
394                  * be able to write to the mapping.  If we don't make this
395                  * restriction, the bookkeeping would be nearly impossible.
396                  *
397                  * XXX We have a shared lock, this will have a MP race but
398                  * I don't see how it can hurt anything.
399                  */
400                 if ((fs.entry->protection & VM_PROT_WRITE) == 0) {
401                         atomic_clear_char(&fs.entry->max_protection,
402                                           VM_PROT_WRITE);
403                 }
404         }
405
406         /*
407          * fs.map is read-locked
408          *
409          * Misc checks.  Save the map generation number to detect races.
410          */
411         fs.map_generation = fs.map->timestamp;
412         fs.lookup_still_valid = TRUE;
413         fs.first_m = NULL;
414         fs.object = fs.first_object;    /* so unlock_and_deallocate works */
415         fs.prot = fs.first_prot;        /* default (used by uksmap) */
416
417         if (fs.entry->eflags & (MAP_ENTRY_NOFAULT | MAP_ENTRY_KSTACK)) {
418                 if (fs.entry->eflags & MAP_ENTRY_NOFAULT) {
419                         panic("vm_fault: fault on nofault entry, addr: %p",
420                               (void *)vaddr);
421                 }
422                 if ((fs.entry->eflags & MAP_ENTRY_KSTACK) &&
423                     vaddr >= fs.entry->start &&
424                     vaddr < fs.entry->start + PAGE_SIZE) {
425                         panic("vm_fault: fault on stack guard, addr: %p",
426                               (void *)vaddr);
427                 }
428         }
429
430         /*
431          * A user-kernel shared map has no VM object and bypasses
432          * everything.  We execute the uksmap function with a temporary
433          * fictitious vm_page.  The address is directly mapped with no
434          * management.
435          */
436         if (fs.entry->maptype == VM_MAPTYPE_UKSMAP) {
437                 struct vm_page fakem;
438
439                 bzero(&fakem, sizeof(fakem));
440                 fakem.pindex = first_pindex;
441                 fakem.flags = PG_FICTITIOUS | PG_UNMANAGED;
442                 fakem.busy_count = PBUSY_LOCKED;
443                 fakem.valid = VM_PAGE_BITS_ALL;
444                 fakem.pat_mode = VM_MEMATTR_DEFAULT;
445                 if (fs.entry->object.uksmap(fs.entry->aux.dev, &fakem)) {
446                         result = KERN_FAILURE;
447                         unlock_things(&fs);
448                         goto done2;
449                 }
450                 pmap_enter(fs.map->pmap, vaddr, &fakem, fs.prot | inherit_prot,
451                            fs.wired, fs.entry);
452                 goto done_success;
453         }
454
455         /*
456          * A system map entry may return a NULL object.  No object means
457          * no pager means an unrecoverable kernel fault.
458          */
459         if (fs.first_object == NULL) {
460                 panic("vm_fault: unrecoverable fault at %p in entry %p",
461                         (void *)vaddr, fs.entry);
462         }
463
464         /*
465          * Fail here if not a trivial anonymous page fault and TDF_NOFAULT
466          * is set.
467          *
468          * Unfortunately a deadlock can occur if we are forced to page-in
469          * from swap, but diving all the way into the vm_pager_get_page()
470          * function to find out is too much.  Just check the object type.
471          *
472          * The deadlock is a CAM deadlock on a busy VM page when trying
473          * to finish an I/O if another process gets stuck in
474          * vop_helper_read_shortcut() due to a swap fault.
475          */
476         if ((td->td_flags & TDF_NOFAULT) &&
477             (retry ||
478              fs.first_object->type == OBJT_VNODE ||
479              fs.first_object->type == OBJT_SWAP ||
480              fs.first_object->backing_object)) {
481                 result = KERN_FAILURE;
482                 unlock_things(&fs);
483                 goto done2;
484         }
485
486         /*
487          * If the entry is wired we cannot change the page protection.
488          */
489         if (fs.wired)
490                 fault_type = fs.first_prot;
491
492         /*
493          * We generally want to avoid unnecessary exclusive modes on backing
494          * and terminal objects because this can seriously interfere with
495          * heavily fork()'d processes (particularly /bin/sh scripts).
496          *
497          * However, we also want to avoid unnecessary retries due to needed
498          * shared->exclusive promotion for common faults.  Exclusive mode is
499          * always needed if any page insertion, rename, or free occurs in an
500          * object (and also indirectly if any I/O is done).
501          *
502          * The main issue here is going to be fs.first_shared.  If the
503          * first_object has a backing object which isn't shadowed and the
504          * process is single-threaded we might as well use an exclusive
505          * lock/chain right off the bat.
506          */
507         if (fs.first_shared && fs.first_object->backing_object &&
508             LIST_EMPTY(&fs.first_object->shadow_head) &&
509             td->td_proc && td->td_proc->p_nthreads == 1) {
510                 fs.first_shared = 0;
511         }
512
513         /*
514          * VM_FAULT_UNSWAP - swap_pager_unswapped() needs an exclusive object
515          * VM_FAULT_DIRTY  - may require swap_pager_unswapped() later, but
516          *                   we can try shared first.
517          */
518         if (fault_flags & VM_FAULT_UNSWAP) {
519                 fs.first_shared = 0;
520         }
521
522         /*
523          * Obtain a top-level object lock, shared or exclusive depending
524          * on fs.first_shared.  If a shared lock winds up being insufficient
525          * we will retry with an exclusive lock.
526          *
527          * The vnode pager lock is always shared.
528          */
529         if (fs.first_shared)
530                 vm_object_hold_shared(fs.first_object);
531         else
532                 vm_object_hold(fs.first_object);
533         if (fs.vp == NULL)
534                 fs.vp = vnode_pager_lock(fs.first_object);
535
536         /*
537          * The page we want is at (first_object, first_pindex), but if the
538          * vm_map_entry is VM_MAPTYPE_VPAGETABLE we have to traverse the
539          * page table to figure out the actual pindex.
540          *
541          * NOTE!  DEVELOPMENT IN PROGRESS, THIS IS AN INITIAL IMPLEMENTATION
542          * ONLY
543          */
544         didilock = 0;
545         if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) {
546                 vm_map_interlock(fs.map, &ilock, vaddr, vaddr + PAGE_SIZE);
547                 didilock = 1;
548                 result = vm_fault_vpagetable(&fs, &first_pindex,
549                                              fs.entry->aux.master_pde,
550                                              fault_type, 1);
551                 if (result == KERN_TRY_AGAIN) {
552                         vm_map_deinterlock(fs.map, &ilock);
553                         vm_object_drop(fs.first_object);
554                         ++retry;
555                         goto RetryFault;
556                 }
557                 if (result != KERN_SUCCESS) {
558                         vm_map_deinterlock(fs.map, &ilock);
559                         goto done;
560                 }
561         }
562
563         /*
564          * Now we have the actual (object, pindex), fault in the page.  If
565          * vm_fault_object() fails it will unlock and deallocate the FS
566          * data.   If it succeeds everything remains locked and fs->object
567          * will have an additional PIP count if it is not equal to
568          * fs->first_object
569          *
570          * vm_fault_object will set fs->prot for the pmap operation.  It is
571          * allowed to set VM_PROT_WRITE if fault_type == VM_PROT_READ if the
572          * page can be safely written.  However, it will force a read-only
573          * mapping for a read fault if the memory is managed by a virtual
574          * page table.
575          *
576          * If the fault code uses the shared object lock shortcut
577          * we must not try to burst (we can't allocate VM pages).
578          */
579         result = vm_fault_object(&fs, first_pindex, fault_type, 1);
580
581         if (debug_fault > 0) {
582                 --debug_fault;
583                 kprintf("VM_FAULT result %d addr=%jx type=%02x flags=%02x "
584                         "fs.m=%p fs.prot=%02x fs.wired=%02x fs.entry=%p\n",
585                         result, (intmax_t)vaddr, fault_type, fault_flags,
586                         fs.m, fs.prot, fs.wired, fs.entry);
587         }
588
589         if (result == KERN_TRY_AGAIN) {
590                 if (didilock)
591                         vm_map_deinterlock(fs.map, &ilock);
592                 vm_object_drop(fs.first_object);
593                 ++retry;
594                 goto RetryFault;
595         }
596         if (result != KERN_SUCCESS) {
597                 if (didilock)
598                         vm_map_deinterlock(fs.map, &ilock);
599                 goto done;
600         }
601
602         /*
603          * On success vm_fault_object() does not unlock or deallocate, and fs.m
604          * will contain a busied page.
605          *
606          * Enter the page into the pmap and do pmap-related adjustments.
607          */
608         KKASSERT(fs.lookup_still_valid == TRUE);
609         vm_page_flag_set(fs.m, PG_REFERENCED);
610         pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot | inherit_prot,
611                    fs.wired, fs.entry);
612
613         if (didilock)
614                 vm_map_deinterlock(fs.map, &ilock);
615
616         /*KKASSERT(fs.m->queue == PQ_NONE); page-in op may deactivate page */
617         KKASSERT(fs.m->busy_count & PBUSY_LOCKED);
618
619         /*
620          * If the page is not wired down, then put it where the pageout daemon
621          * can find it.
622          */
623         if (fs.fault_flags & VM_FAULT_WIRE_MASK) {
624                 if (fs.wired)
625                         vm_page_wire(fs.m);
626                 else
627                         vm_page_unwire(fs.m, 1);
628         } else {
629                 vm_page_activate(fs.m);
630         }
631         vm_page_wakeup(fs.m);
632
633         /*
634          * Burst in a few more pages if possible.  The fs.map should still
635          * be locked.  To avoid interlocking against a vnode->getblk
636          * operation we had to be sure to unbusy our primary vm_page above
637          * first.
638          *
639          * A normal burst can continue down backing store, only execute
640          * if we are holding an exclusive lock, otherwise the exclusive
641          * locks the burst code gets might cause excessive SMP collisions.
642          *
643          * A quick burst can be utilized when there is no backing object
644          * (i.e. a shared file mmap).
645          */
646         if ((fault_flags & VM_FAULT_BURST) &&
647             (fs.fault_flags & VM_FAULT_WIRE_MASK) == 0 &&
648             fs.wired == 0) {
649                 if (fs.first_shared == 0 && fs.shared == 0) {
650                         vm_prefault(fs.map->pmap, vaddr,
651                                     fs.entry, fs.prot, fault_flags);
652                 } else {
653                         vm_prefault_quick(fs.map->pmap, vaddr,
654                                           fs.entry, fs.prot, fault_flags);
655                 }
656         }
657
658 done_success:
659         mycpu->gd_cnt.v_vm_faults++;
660         if (td->td_lwp)
661                 ++td->td_lwp->lwp_ru.ru_minflt;
662
663         /*
664          * Unlock everything, and return
665          */
666         unlock_things(&fs);
667
668         if (td->td_lwp) {
669                 if (fs.hardfault) {
670                         td->td_lwp->lwp_ru.ru_majflt++;
671                 } else {
672                         td->td_lwp->lwp_ru.ru_minflt++;
673                 }
674         }
675
676         /*vm_object_deallocate(fs.first_object);*/
677         /*fs.m = NULL; */
678         /*fs.first_object = NULL; must still drop later */
679
680         result = KERN_SUCCESS;
681 done:
682         if (fs.first_object)
683                 vm_object_drop(fs.first_object);
684 done2:
685         if (lp)
686                 lp->lwp_flags &= ~LWP_PAGING;
687
688 #if !defined(NO_SWAPPING)
689         /*
690          * Check the process RSS limit and force deactivation and
691          * (asynchronous) paging if necessary.  This is a complex operation,
692          * only do it for direct user-mode faults, for now.
693          *
694          * To reduce overhead implement approximately a ~16MB hysteresis.
695          */
696         p = td->td_proc;
697         if ((fault_flags & VM_FAULT_USERMODE) && lp &&
698             p->p_limit && map->pmap && vm_pageout_memuse_mode >= 1 &&
699             map != &kernel_map) {
700                 vm_pindex_t limit;
701                 vm_pindex_t size;
702
703                 limit = OFF_TO_IDX(qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
704                                         p->p_rlimit[RLIMIT_RSS].rlim_max));
705                 size = pmap_resident_tlnw_count(map->pmap);
706                 if (limit >= 0 && size > 4096 && size - 4096 >= limit) {
707                         vm_pageout_map_deactivate_pages(map, limit);
708                 }
709         }
710 #endif
711
712         return (result);
713 }
714
715 /*
716  * Fault in the specified virtual address in the current process map, 
717  * returning a held VM page or NULL.  See vm_fault_page() for more 
718  * information.
719  *
720  * No requirements.
721  */
722 vm_page_t
723 vm_fault_page_quick(vm_offset_t va, vm_prot_t fault_type,
724                     int *errorp, int *busyp)
725 {
726         struct lwp *lp = curthread->td_lwp;
727         vm_page_t m;
728
729         m = vm_fault_page(&lp->lwp_vmspace->vm_map, va, 
730                           fault_type, VM_FAULT_NORMAL,
731                           errorp, busyp);
732         return(m);
733 }
734
735 /*
736  * Fault in the specified virtual address in the specified map, doing all
737  * necessary manipulation of the object store and all necessary I/O.  Return
738  * a held VM page or NULL, and set *errorp.  The related pmap is not
739  * updated.
740  *
741  * If busyp is not NULL then *busyp will be set to TRUE if this routine
742  * decides to return a busied page (aka VM_PROT_WRITE), or FALSE if it
743  * does not (VM_PROT_WRITE not specified or busyp is NULL).  If busyp is
744  * NULL the returned page is only held.
745  *
746  * If the caller has no intention of writing to the page's contents, busyp
747  * can be passed as NULL along with VM_PROT_WRITE to force a COW operation
748  * without busying the page.
749  *
750  * The returned page will also be marked PG_REFERENCED.
751  *
752  * If the page cannot be faulted writable and VM_PROT_WRITE was specified, an
753  * error will be returned.
754  *
755  * No requirements.
756  */
757 vm_page_t
758 vm_fault_page(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
759               int fault_flags, int *errorp, int *busyp)
760 {
761         vm_pindex_t first_pindex;
762         struct faultstate fs;
763         int result;
764         int retry;
765         int growstack;
766         vm_prot_t orig_fault_type = fault_type;
767
768         retry = 0;
769         fs.hardfault = 0;
770         fs.fault_flags = fault_flags;
771         KKASSERT((fault_flags & VM_FAULT_WIRE_MASK) == 0);
772
773         /*
774          * Dive the pmap (concurrency possible).  If we find the
775          * appropriate page we can terminate early and quickly.
776          *
777          * This works great for normal programs but will always return
778          * NULL for host lookups of vkernel maps in VMM mode.
779          *
780          * NOTE: pmap_fault_page_quick() might not busy the page.  If
781          *       VM_PROT_WRITE or VM_PROT_OVERRIDE_WRITE is set in
782          *       fault_type and pmap_fault_page_quick() returns non-NULL,
783          *       it will safely dirty the returned vm_page_t for us.  We
784          *       cannot safely dirty it here (it might not be busy).
785          */
786         fs.m = pmap_fault_page_quick(map->pmap, vaddr, fault_type, busyp);
787         if (fs.m) {
788                 *errorp = 0;
789                 return(fs.m);
790         }
791
792         /*
793          * Otherwise take a concurrency hit and do a formal page
794          * fault.
795          */
796         fs.vp = NULL;
797         fs.shared = vm_shared_fault;
798         fs.first_shared = vm_shared_fault;
799         growstack = 1;
800
801         /*
802          * VM_FAULT_UNSWAP - swap_pager_unswapped() needs an exclusive object
803          * VM_FAULT_DIRTY  - may require swap_pager_unswapped() later, but
804          *                   we can try shared first.
805          */
806         if (fault_flags & VM_FAULT_UNSWAP) {
807                 fs.first_shared = 0;
808         }
809
810 RetryFault:
811         /*
812          * Find the vm_map_entry representing the backing store and resolve
813          * the top level object and page index.  This may have the side
814          * effect of executing a copy-on-write on the map entry and/or
815          * creating a shadow object, but will not COW any actual VM pages.
816          *
817          * On success fs.map is left read-locked and various other fields 
818          * are initialized but not otherwise referenced or locked.
819          *
820          * NOTE!  vm_map_lookup will upgrade the fault_type to VM_FAULT_WRITE
821          *        if the map entry is a virtual page table and also writable,
822          *        so we can set the 'A'accessed bit in the virtual page table
823          *        entry.
824          */
825         fs.map = map;
826         result = vm_map_lookup(&fs.map, vaddr, fault_type,
827                                &fs.entry, &fs.first_object,
828                                &first_pindex, &fs.first_prot, &fs.wired);
829
830         if (result != KERN_SUCCESS) {
831                 if (result == KERN_FAILURE_NOFAULT) {
832                         *errorp = KERN_FAILURE;
833                         fs.m = NULL;
834                         goto done;
835                 }
836                 if (result != KERN_PROTECTION_FAILURE ||
837                     (fs.fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE)
838                 {
839                         if (result == KERN_INVALID_ADDRESS && growstack &&
840                             map != &kernel_map && curproc != NULL) {
841                                 result = vm_map_growstack(map, vaddr);
842                                 if (result == KERN_SUCCESS) {
843                                         growstack = 0;
844                                         ++retry;
845                                         goto RetryFault;
846                                 }
847                                 result = KERN_FAILURE;
848                         }
849                         fs.m = NULL;
850                         *errorp = result;
851                         goto done;
852                 }
853
854                 /*
855                  * If we are user-wiring a r/w segment, and it is COW, then
856                  * we need to do the COW operation.  Note that we don't
857                  * currently COW RO sections now, because it is NOT desirable
858                  * to COW .text.  We simply keep .text from ever being COW'ed
859                  * and take the heat that one cannot debug wired .text sections.
860                  */
861                 result = vm_map_lookup(&fs.map, vaddr,
862                                        VM_PROT_READ|VM_PROT_WRITE|
863                                         VM_PROT_OVERRIDE_WRITE,
864                                        &fs.entry, &fs.first_object,
865                                        &first_pindex, &fs.first_prot,
866                                        &fs.wired);
867                 if (result != KERN_SUCCESS) {
868                         /* could also be KERN_FAILURE_NOFAULT */
869                         *errorp = KERN_FAILURE;
870                         fs.m = NULL;
871                         goto done;
872                 }
873
874                 /*
875                  * If we don't COW now, on a user wire, the user will never
876                  * be able to write to the mapping.  If we don't make this
877                  * restriction, the bookkeeping would be nearly impossible.
878                  *
879                  * XXX We have a shared lock, this will have a MP race but
880                  * I don't see how it can hurt anything.
881                  */
882                 if ((fs.entry->protection & VM_PROT_WRITE) == 0) {
883                         atomic_clear_char(&fs.entry->max_protection,
884                                           VM_PROT_WRITE);
885                 }
886         }
887
888         /*
889          * fs.map is read-locked
890          *
891          * Misc checks.  Save the map generation number to detect races.
892          */
893         fs.map_generation = fs.map->timestamp;
894         fs.lookup_still_valid = TRUE;
895         fs.first_m = NULL;
896         fs.object = fs.first_object;    /* so unlock_and_deallocate works */
897
898         if (fs.entry->eflags & MAP_ENTRY_NOFAULT) {
899                 panic("vm_fault: fault on nofault entry, addr: %lx",
900                     (u_long)vaddr);
901         }
902
903         /*
904          * A user-kernel shared map has no VM object and bypasses
905          * everything.  We execute the uksmap function with a temporary
906          * fictitious vm_page.  The address is directly mapped with no
907          * management.
908          */
909         if (fs.entry->maptype == VM_MAPTYPE_UKSMAP) {
910                 struct vm_page fakem;
911
912                 bzero(&fakem, sizeof(fakem));
913                 fakem.pindex = first_pindex;
914                 fakem.flags = PG_FICTITIOUS | PG_UNMANAGED;
915                 fakem.busy_count = PBUSY_LOCKED;
916                 fakem.valid = VM_PAGE_BITS_ALL;
917                 fakem.pat_mode = VM_MEMATTR_DEFAULT;
918                 if (fs.entry->object.uksmap(fs.entry->aux.dev, &fakem)) {
919                         *errorp = KERN_FAILURE;
920                         fs.m = NULL;
921                         unlock_things(&fs);
922                         goto done2;
923                 }
924                 fs.m = PHYS_TO_VM_PAGE(fakem.phys_addr);
925                 vm_page_hold(fs.m);
926                 if (busyp)
927                         *busyp = 0;     /* don't need to busy R or W */
928                 unlock_things(&fs);
929                 *errorp = 0;
930                 goto done;
931         }
932
933
934         /*
935          * A system map entry may return a NULL object.  No object means
936          * no pager means an unrecoverable kernel fault.
937          */
938         if (fs.first_object == NULL) {
939                 panic("vm_fault: unrecoverable fault at %p in entry %p",
940                         (void *)vaddr, fs.entry);
941         }
942
943         /*
944          * Fail here if not a trivial anonymous page fault and TDF_NOFAULT
945          * is set.
946          *
947          * Unfortunately a deadlock can occur if we are forced to page-in
948          * from swap, but diving all the way into the vm_pager_get_page()
949          * function to find out is too much.  Just check the object type.
950          */
951         if ((curthread->td_flags & TDF_NOFAULT) &&
952             (retry ||
953              fs.first_object->type == OBJT_VNODE ||
954              fs.first_object->type == OBJT_SWAP ||
955              fs.first_object->backing_object)) {
956                 *errorp = KERN_FAILURE;
957                 unlock_things(&fs);
958                 fs.m = NULL;
959                 goto done2;
960         }
961
962         /*
963          * If the entry is wired we cannot change the page protection.
964          */
965         if (fs.wired)
966                 fault_type = fs.first_prot;
967
968         /*
969          * Make a reference to this object to prevent its disposal while we
970          * are messing with it.  Once we have the reference, the map is free
971          * to be diddled.  Since objects reference their shadows (and copies),
972          * they will stay around as well.
973          *
974          * The reference should also prevent an unexpected collapse of the
975          * parent that might move pages from the current object into the
976          * parent unexpectedly, resulting in corruption.
977          *
978          * Bump the paging-in-progress count to prevent size changes (e.g.
979          * truncation operations) during I/O.  This must be done after
980          * obtaining the vnode lock in order to avoid possible deadlocks.
981          */
982         if (fs.first_shared)
983                 vm_object_hold_shared(fs.first_object);
984         else
985                 vm_object_hold(fs.first_object);
986         if (fs.vp == NULL)
987                 fs.vp = vnode_pager_lock(fs.first_object);      /* shared */
988
989         /*
990          * The page we want is at (first_object, first_pindex), but if the
991          * vm_map_entry is VM_MAPTYPE_VPAGETABLE we have to traverse the
992          * page table to figure out the actual pindex.
993          *
994          * NOTE!  DEVELOPMENT IN PROGRESS, THIS IS AN INITIAL IMPLEMENTATION
995          * ONLY
996          */
997         if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) {
998                 result = vm_fault_vpagetable(&fs, &first_pindex,
999                                              fs.entry->aux.master_pde,
1000                                              fault_type, 1);
1001                 if (result == KERN_TRY_AGAIN) {
1002                         vm_object_drop(fs.first_object);
1003                         ++retry;
1004                         goto RetryFault;
1005                 }
1006                 if (result != KERN_SUCCESS) {
1007                         *errorp = result;
1008                         fs.m = NULL;
1009                         goto done;
1010                 }
1011         }
1012
1013         /*
1014          * Now we have the actual (object, pindex), fault in the page.  If
1015          * vm_fault_object() fails it will unlock and deallocate the FS
1016          * data.   If it succeeds everything remains locked and fs->object
1017          * will have an additinal PIP count if it is not equal to
1018          * fs->first_object
1019          */
1020         fs.m = NULL;
1021         result = vm_fault_object(&fs, first_pindex, fault_type, 1);
1022
1023         if (result == KERN_TRY_AGAIN) {
1024                 vm_object_drop(fs.first_object);
1025                 ++retry;
1026                 goto RetryFault;
1027         }
1028         if (result != KERN_SUCCESS) {
1029                 *errorp = result;
1030                 fs.m = NULL;
1031                 goto done;
1032         }
1033
1034         if ((orig_fault_type & VM_PROT_WRITE) &&
1035             (fs.prot & VM_PROT_WRITE) == 0) {
1036                 *errorp = KERN_PROTECTION_FAILURE;
1037                 unlock_and_deallocate(&fs);
1038                 fs.m = NULL;
1039                 goto done;
1040         }
1041
1042         /*
1043          * DO NOT UPDATE THE PMAP!!!  This function may be called for
1044          * a pmap unrelated to the current process pmap, in which case
1045          * the current cpu core will not be listed in the pmap's pm_active
1046          * mask.  Thus invalidation interlocks will fail to work properly.
1047          *
1048          * (for example, 'ps' uses procfs to read program arguments from
1049          * each process's stack).
1050          *
1051          * In addition to the above this function will be called to acquire
1052          * a page that might already be faulted in, re-faulting it
1053          * continuously is a waste of time.
1054          *
1055          * XXX could this have been the cause of our random seg-fault
1056          *     issues?  procfs accesses user stacks.
1057          */
1058         vm_page_flag_set(fs.m, PG_REFERENCED);
1059 #if 0
1060         pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, fs.wired, NULL);
1061         mycpu->gd_cnt.v_vm_faults++;
1062         if (curthread->td_lwp)
1063                 ++curthread->td_lwp->lwp_ru.ru_minflt;
1064 #endif
1065
1066         /*
1067          * On success vm_fault_object() does not unlock or deallocate, and fs.m
1068          * will contain a busied page.  So we must unlock here after having
1069          * messed with the pmap.
1070          */
1071         unlock_things(&fs);
1072
1073         /*
1074          * Return a held page.  We are not doing any pmap manipulation so do
1075          * not set PG_MAPPED.  However, adjust the page flags according to
1076          * the fault type because the caller may not use a managed pmapping
1077          * (so we don't want to lose the fact that the page will be dirtied
1078          * if a write fault was specified).
1079          */
1080         if (fault_type & VM_PROT_WRITE)
1081                 vm_page_dirty(fs.m);
1082         vm_page_activate(fs.m);
1083
1084         if (curthread->td_lwp) {
1085                 if (fs.hardfault) {
1086                         curthread->td_lwp->lwp_ru.ru_majflt++;
1087                 } else {
1088                         curthread->td_lwp->lwp_ru.ru_minflt++;
1089                 }
1090         }
1091
1092         /*
1093          * Unlock everything, and return the held or busied page.
1094          */
1095         if (busyp) {
1096                 if (fault_type & (VM_PROT_WRITE|VM_PROT_OVERRIDE_WRITE)) {
1097                         vm_page_dirty(fs.m);
1098                         *busyp = 1;
1099                 } else {
1100                         *busyp = 0;
1101                         vm_page_hold(fs.m);
1102                         vm_page_wakeup(fs.m);
1103                 }
1104         } else {
1105                 vm_page_hold(fs.m);
1106                 vm_page_wakeup(fs.m);
1107         }
1108         /*vm_object_deallocate(fs.first_object);*/
1109         /*fs.first_object = NULL; */
1110         *errorp = 0;
1111
1112 done:
1113         if (fs.first_object)
1114                 vm_object_drop(fs.first_object);
1115 done2:
1116         return(fs.m);
1117 }
1118
1119 /*
1120  * Fault in the specified (object,offset), dirty the returned page as
1121  * needed.  If the requested fault_type cannot be done NULL and an
1122  * error is returned.
1123  *
1124  * A held (but not busied) page is returned.
1125  *
1126  * The passed in object must be held as specified by the shared
1127  * argument.
1128  */
1129 vm_page_t
1130 vm_fault_object_page(vm_object_t object, vm_ooffset_t offset,
1131                      vm_prot_t fault_type, int fault_flags,
1132                      int *sharedp, int *errorp)
1133 {
1134         int result;
1135         vm_pindex_t first_pindex;
1136         struct faultstate fs;
1137         struct vm_map_entry entry;
1138
1139         ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
1140         bzero(&entry, sizeof(entry));
1141         entry.object.vm_object = object;
1142         entry.maptype = VM_MAPTYPE_NORMAL;
1143         entry.protection = entry.max_protection = fault_type;
1144
1145         fs.hardfault = 0;
1146         fs.fault_flags = fault_flags;
1147         fs.map = NULL;
1148         fs.shared = vm_shared_fault;
1149         fs.first_shared = *sharedp;
1150         fs.vp = NULL;
1151         KKASSERT((fault_flags & VM_FAULT_WIRE_MASK) == 0);
1152
1153         /*
1154          * VM_FAULT_UNSWAP - swap_pager_unswapped() needs an exclusive object
1155          * VM_FAULT_DIRTY  - may require swap_pager_unswapped() later, but
1156          *                   we can try shared first.
1157          */
1158         if (fs.first_shared && (fault_flags & VM_FAULT_UNSWAP)) {
1159                 fs.first_shared = 0;
1160                 vm_object_upgrade(object);
1161         }
1162
1163         /*
1164          * Retry loop as needed (typically for shared->exclusive transitions)
1165          */
1166 RetryFault:
1167         *sharedp = fs.first_shared;
1168         first_pindex = OFF_TO_IDX(offset);
1169         fs.first_object = object;
1170         fs.entry = &entry;
1171         fs.first_prot = fault_type;
1172         fs.wired = 0;
1173         /*fs.map_generation = 0; unused */
1174
1175         /*
1176          * Make a reference to this object to prevent its disposal while we
1177          * are messing with it.  Once we have the reference, the map is free
1178          * to be diddled.  Since objects reference their shadows (and copies),
1179          * they will stay around as well.
1180          *
1181          * The reference should also prevent an unexpected collapse of the
1182          * parent that might move pages from the current object into the
1183          * parent unexpectedly, resulting in corruption.
1184          *
1185          * Bump the paging-in-progress count to prevent size changes (e.g.
1186          * truncation operations) during I/O.  This must be done after
1187          * obtaining the vnode lock in order to avoid possible deadlocks.
1188          */
1189         if (fs.vp == NULL)
1190                 fs.vp = vnode_pager_lock(fs.first_object);
1191
1192         fs.lookup_still_valid = TRUE;
1193         fs.first_m = NULL;
1194         fs.object = fs.first_object;    /* so unlock_and_deallocate works */
1195
1196 #if 0
1197         /* XXX future - ability to operate on VM object using vpagetable */
1198         if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) {
1199                 result = vm_fault_vpagetable(&fs, &first_pindex,
1200                                              fs.entry->aux.master_pde,
1201                                              fault_type, 0);
1202                 if (result == KERN_TRY_AGAIN) {
1203                         if (fs.first_shared == 0 && *sharedp)
1204                                 vm_object_upgrade(object);
1205                         goto RetryFault;
1206                 }
1207                 if (result != KERN_SUCCESS) {
1208                         *errorp = result;
1209                         return (NULL);
1210                 }
1211         }
1212 #endif
1213
1214         /*
1215          * Now we have the actual (object, pindex), fault in the page.  If
1216          * vm_fault_object() fails it will unlock and deallocate the FS
1217          * data.   If it succeeds everything remains locked and fs->object
1218          * will have an additinal PIP count if it is not equal to
1219          * fs->first_object
1220          *
1221          * On KERN_TRY_AGAIN vm_fault_object() leaves fs.first_object intact.
1222          * We may have to upgrade its lock to handle the requested fault.
1223          */
1224         result = vm_fault_object(&fs, first_pindex, fault_type, 0);
1225
1226         if (result == KERN_TRY_AGAIN) {
1227                 if (fs.first_shared == 0 && *sharedp)
1228                         vm_object_upgrade(object);
1229                 goto RetryFault;
1230         }
1231         if (result != KERN_SUCCESS) {
1232                 *errorp = result;
1233                 return(NULL);
1234         }
1235
1236         if ((fault_type & VM_PROT_WRITE) && (fs.prot & VM_PROT_WRITE) == 0) {
1237                 *errorp = KERN_PROTECTION_FAILURE;
1238                 unlock_and_deallocate(&fs);
1239                 return(NULL);
1240         }
1241
1242         /*
1243          * On success vm_fault_object() does not unlock or deallocate, so we
1244          * do it here.  Note that the returned fs.m will be busied.
1245          */
1246         unlock_things(&fs);
1247
1248         /*
1249          * Return a held page.  We are not doing any pmap manipulation so do
1250          * not set PG_MAPPED.  However, adjust the page flags according to
1251          * the fault type because the caller may not use a managed pmapping
1252          * (so we don't want to lose the fact that the page will be dirtied
1253          * if a write fault was specified).
1254          */
1255         vm_page_hold(fs.m);
1256         vm_page_activate(fs.m);
1257         if ((fault_type & VM_PROT_WRITE) || (fault_flags & VM_FAULT_DIRTY))
1258                 vm_page_dirty(fs.m);
1259         if (fault_flags & VM_FAULT_UNSWAP)
1260                 swap_pager_unswapped(fs.m);
1261
1262         /*
1263          * Indicate that the page was accessed.
1264          */
1265         vm_page_flag_set(fs.m, PG_REFERENCED);
1266
1267         if (curthread->td_lwp) {
1268                 if (fs.hardfault) {
1269                         curthread->td_lwp->lwp_ru.ru_majflt++;
1270                 } else {
1271                         curthread->td_lwp->lwp_ru.ru_minflt++;
1272                 }
1273         }
1274
1275         /*
1276          * Unlock everything, and return the held page.
1277          */
1278         vm_page_wakeup(fs.m);
1279         /*vm_object_deallocate(fs.first_object);*/
1280         /*fs.first_object = NULL; */
1281
1282         *errorp = 0;
1283         return(fs.m);
1284 }
1285
1286 /*
1287  * Translate the virtual page number (first_pindex) that is relative
1288  * to the address space into a logical page number that is relative to the
1289  * backing object.  Use the virtual page table pointed to by (vpte).
1290  *
1291  * Possibly downgrade the protection based on the vpte bits.
1292  *
1293  * This implements an N-level page table.  Any level can terminate the
1294  * scan by setting VPTE_PS.   A linear mapping is accomplished by setting
1295  * VPTE_PS in the master page directory entry set via mcontrol(MADV_SETMAP).
1296  */
1297 static
1298 int
1299 vm_fault_vpagetable(struct faultstate *fs, vm_pindex_t *pindex,
1300                     vpte_t vpte, int fault_type, int allow_nofault)
1301 {
1302         struct lwbuf *lwb;
1303         struct lwbuf lwb_cache;
1304         int vshift = VPTE_FRAME_END - PAGE_SHIFT; /* index bits remaining */
1305         int result;
1306         vpte_t *ptep;
1307
1308         ASSERT_LWKT_TOKEN_HELD(vm_object_token(fs->first_object));
1309         for (;;) {
1310                 /*
1311                  * We cannot proceed if the vpte is not valid, not readable
1312                  * for a read fault, not writable for a write fault, or
1313                  * not executable for an instruction execution fault.
1314                  */
1315                 if ((vpte & VPTE_V) == 0) {
1316                         unlock_and_deallocate(fs);
1317                         return (KERN_FAILURE);
1318                 }
1319                 if ((fault_type & VM_PROT_WRITE) && (vpte & VPTE_RW) == 0) {
1320                         unlock_and_deallocate(fs);
1321                         return (KERN_FAILURE);
1322                 }
1323                 if ((fault_type & VM_PROT_EXECUTE) && (vpte & VPTE_NX)) {
1324                         unlock_and_deallocate(fs);
1325                         return (KERN_FAILURE);
1326                 }
1327                 if ((vpte & VPTE_PS) || vshift == 0)
1328                         break;
1329
1330                 /*
1331                  * Get the page table page.  Nominally we only read the page
1332                  * table, but since we are actively setting VPTE_M and VPTE_A,
1333                  * tell vm_fault_object() that we are writing it. 
1334                  *
1335                  * There is currently no real need to optimize this.
1336                  */
1337                 result = vm_fault_object(fs, (vpte & VPTE_FRAME) >> PAGE_SHIFT,
1338                                          VM_PROT_READ|VM_PROT_WRITE,
1339                                          allow_nofault);
1340                 if (result != KERN_SUCCESS)
1341                         return (result);
1342
1343                 /*
1344                  * Process the returned fs.m and look up the page table
1345                  * entry in the page table page.
1346                  */
1347                 vshift -= VPTE_PAGE_BITS;
1348                 lwb = lwbuf_alloc(fs->m, &lwb_cache);
1349                 ptep = ((vpte_t *)lwbuf_kva(lwb) +
1350                         ((*pindex >> vshift) & VPTE_PAGE_MASK));
1351                 vm_page_activate(fs->m);
1352
1353                 /*
1354                  * Page table write-back - entire operation including
1355                  * validation of the pte must be atomic to avoid races
1356                  * against the vkernel changing the pte.
1357                  *
1358                  * If the vpte is valid for the* requested operation, do
1359                  * a write-back to the page table.
1360                  *
1361                  * XXX VPTE_M is not set properly for page directory pages.
1362                  * It doesn't get set in the page directory if the page table
1363                  * is modified during a read access.
1364                  */
1365                 for (;;) {
1366                         vpte_t nvpte;
1367
1368                         /*
1369                          * Reload for the cmpset, but make sure the pte is
1370                          * still valid.
1371                          */
1372                         vpte = *ptep;
1373                         cpu_ccfence();
1374                         nvpte = vpte;
1375
1376                         if ((vpte & VPTE_V) == 0)
1377                                 break;
1378
1379                         if ((fault_type & VM_PROT_WRITE) && (vpte & VPTE_RW))
1380                                 nvpte |= VPTE_M | VPTE_A;
1381                         if (fault_type & (VM_PROT_READ | VM_PROT_EXECUTE))
1382                                 nvpte |= VPTE_A;
1383                         if (vpte == nvpte)
1384                                 break;
1385                         if (atomic_cmpset_long(ptep, vpte, nvpte)) {
1386                                 vm_page_dirty(fs->m);
1387                                 break;
1388                         }
1389                 }
1390                 lwbuf_free(lwb);
1391                 vm_page_flag_set(fs->m, PG_REFERENCED);
1392                 vm_page_wakeup(fs->m);
1393                 fs->m = NULL;
1394                 cleanup_successful_fault(fs);
1395         }
1396
1397         /*
1398          * When the vkernel sets VPTE_RW it expects the real kernel to
1399          * reflect VPTE_M back when the page is modified via the mapping.
1400          * In order to accomplish this the real kernel must map the page
1401          * read-only for read faults and use write faults to reflect VPTE_M
1402          * back.
1403          *
1404          * Once VPTE_M has been set, the real kernel's pte allows writing.
1405          * If the vkernel clears VPTE_M the vkernel must be sure to
1406          * MADV_INVAL the real kernel's mappings to force the real kernel
1407          * to re-fault on the next write so oit can set VPTE_M again.
1408          */
1409         if ((fault_type & VM_PROT_WRITE) == 0 &&
1410             (vpte & (VPTE_RW | VPTE_M)) != (VPTE_RW | VPTE_M)) {
1411                 fs->first_prot &= ~VM_PROT_WRITE;
1412         }
1413
1414         /*
1415          * Disable EXECUTE perms if NX bit is set.
1416          */
1417         if (vpte & VPTE_NX)
1418                 fs->first_prot &= ~VM_PROT_EXECUTE;
1419
1420         /*
1421          * Combine remaining address bits with the vpte.
1422          */
1423         *pindex = ((vpte & VPTE_FRAME) >> PAGE_SHIFT) +
1424                   (*pindex & ((1L << vshift) - 1));
1425         return (KERN_SUCCESS);
1426 }
1427
1428
1429 /*
1430  * This is the core of the vm_fault code.
1431  *
1432  * Do all operations required to fault-in (fs.first_object, pindex).  Run
1433  * through the shadow chain as necessary and do required COW or virtual
1434  * copy operations.  The caller has already fully resolved the vm_map_entry
1435  * and, if appropriate, has created a copy-on-write layer.  All we need to
1436  * do is iterate the object chain.
1437  *
1438  * On failure (fs) is unlocked and deallocated and the caller may return or
1439  * retry depending on the failure code.  On success (fs) is NOT unlocked or
1440  * deallocated, fs.m will contained a resolved, busied page, and fs.object
1441  * will have an additional PIP count if it is not equal to fs.first_object.
1442  *
1443  * If locks based on fs->first_shared or fs->shared are insufficient,
1444  * clear the appropriate field(s) and return RETRY.  COWs require that
1445  * first_shared be 0, while page allocations (or frees) require that
1446  * shared be 0.  Renames require that both be 0.
1447  *
1448  * NOTE! fs->[first_]shared might be set with VM_FAULT_DIRTY also set.
1449  *       we will have to retry with it exclusive if the vm_page is
1450  *       PG_SWAPPED.
1451  *
1452  * fs->first_object must be held on call.
1453  */
1454 static
1455 int
1456 vm_fault_object(struct faultstate *fs, vm_pindex_t first_pindex,
1457                 vm_prot_t fault_type, int allow_nofault)
1458 {
1459         vm_object_t next_object;
1460         vm_pindex_t pindex;
1461         int error;
1462
1463         ASSERT_LWKT_TOKEN_HELD(vm_object_token(fs->first_object));
1464         fs->prot = fs->first_prot;
1465         fs->object = fs->first_object;
1466         pindex = first_pindex;
1467
1468         vm_object_chain_acquire(fs->first_object, fs->shared);
1469         vm_object_pip_add(fs->first_object, 1);
1470
1471         /* 
1472          * If a read fault occurs we try to upgrade the page protection
1473          * and make it also writable if possible.  There are three cases
1474          * where we cannot make the page mapping writable:
1475          *
1476          * (1) The mapping is read-only or the VM object is read-only,
1477          *     fs->prot above will simply not have VM_PROT_WRITE set.
1478          *
1479          * (2) If the mapping is a virtual page table fs->first_prot will
1480          *     have already been properly adjusted by vm_fault_vpagetable().
1481          *     to detect writes so we can set VPTE_M in the virtual page
1482          *     table.  Used by vkernels.
1483          *
1484          * (3) If the VM page is read-only or copy-on-write, upgrading would
1485          *     just result in an unnecessary COW fault.
1486          *
1487          * (4) If the pmap specifically requests A/M bit emulation, downgrade
1488          *     here.
1489          */
1490 #if 0
1491         /* see vpagetable code */
1492         if (fs->entry->maptype == VM_MAPTYPE_VPAGETABLE) {
1493                 if ((fault_type & VM_PROT_WRITE) == 0)
1494                         fs->prot &= ~VM_PROT_WRITE;
1495         }
1496 #endif
1497
1498         if (curthread->td_lwp && curthread->td_lwp->lwp_vmspace &&
1499             pmap_emulate_ad_bits(&curthread->td_lwp->lwp_vmspace->vm_pmap)) {
1500                 if ((fault_type & VM_PROT_WRITE) == 0)
1501                         fs->prot &= ~VM_PROT_WRITE;
1502         }
1503
1504         /* vm_object_hold(fs->object); implied b/c object == first_object */
1505
1506         for (;;) {
1507                 /*
1508                  * The entire backing chain from first_object to object
1509                  * inclusive is chainlocked.
1510                  *
1511                  * If the object is dead, we stop here
1512                  */
1513                 if (fs->object->flags & OBJ_DEAD) {
1514                         vm_object_pip_wakeup(fs->first_object);
1515                         vm_object_chain_release_all(fs->first_object,
1516                                                     fs->object);
1517                         if (fs->object != fs->first_object)
1518                                 vm_object_drop(fs->object);
1519                         unlock_and_deallocate(fs);
1520                         return (KERN_PROTECTION_FAILURE);
1521                 }
1522
1523                 /*
1524                  * See if the page is resident.  Wait/Retry if the page is
1525                  * busy (lots of stuff may have changed so we can't continue
1526                  * in that case).
1527                  *
1528                  * We can theoretically allow the soft-busy case on a read
1529                  * fault if the page is marked valid, but since such
1530                  * pages are typically already pmap'd, putting that
1531                  * special case in might be more effort then it is
1532                  * worth.  We cannot under any circumstances mess
1533                  * around with a vm_page_t->busy page except, perhaps,
1534                  * to pmap it.
1535                  */
1536                 fs->m = vm_page_lookup_busy_try(fs->object, pindex,
1537                                                 TRUE, &error);
1538                 if (error) {
1539                         vm_object_pip_wakeup(fs->first_object);
1540                         vm_object_chain_release_all(fs->first_object,
1541                                                     fs->object);
1542                         if (fs->object != fs->first_object)
1543                                 vm_object_drop(fs->object);
1544                         unlock_things(fs);
1545                         vm_page_sleep_busy(fs->m, TRUE, "vmpfw");
1546                         mycpu->gd_cnt.v_intrans++;
1547                         /*vm_object_deallocate(fs->first_object);*/
1548                         /*fs->first_object = NULL;*/
1549                         fs->m = NULL;
1550                         return (KERN_TRY_AGAIN);
1551                 }
1552                 if (fs->m) {
1553                         /*
1554                          * The page is busied for us.
1555                          *
1556                          * If reactivating a page from PQ_CACHE we may have
1557                          * to rate-limit.
1558                          */
1559                         int queue = fs->m->queue;
1560                         vm_page_unqueue_nowakeup(fs->m);
1561
1562                         if ((queue - fs->m->pc) == PQ_CACHE && 
1563                             vm_page_count_severe()) {
1564                                 vm_page_activate(fs->m);
1565                                 vm_page_wakeup(fs->m);
1566                                 fs->m = NULL;
1567                                 vm_object_pip_wakeup(fs->first_object);
1568                                 vm_object_chain_release_all(fs->first_object,
1569                                                             fs->object);
1570                                 if (fs->object != fs->first_object)
1571                                         vm_object_drop(fs->object);
1572                                 unlock_and_deallocate(fs);
1573                                 if (allow_nofault == 0 ||
1574                                     (curthread->td_flags & TDF_NOFAULT) == 0) {
1575                                         thread_t td;
1576
1577                                         vm_wait_pfault();
1578                                         td = curthread;
1579                                         if (td->td_proc && (td->td_proc->p_flags & P_LOWMEMKILL))
1580                                                 return (KERN_PROTECTION_FAILURE);
1581                                 }
1582                                 return (KERN_TRY_AGAIN);
1583                         }
1584
1585                         /*
1586                          * If it still isn't completely valid (readable),
1587                          * or if a read-ahead-mark is set on the VM page,
1588                          * jump to readrest, else we found the page and
1589                          * can return.
1590                          *
1591                          * We can release the spl once we have marked the
1592                          * page busy.
1593                          */
1594                         if (fs->m->object != &kernel_object) {
1595                                 if ((fs->m->valid & VM_PAGE_BITS_ALL) !=
1596                                     VM_PAGE_BITS_ALL) {
1597                                         goto readrest;
1598                                 }
1599                                 if (fs->m->flags & PG_RAM) {
1600                                         if (debug_cluster)
1601                                                 kprintf("R");
1602                                         vm_page_flag_clear(fs->m, PG_RAM);
1603                                         goto readrest;
1604                                 }
1605                         }
1606                         break; /* break to PAGE HAS BEEN FOUND */
1607                 }
1608
1609                 /*
1610                  * Page is not resident, If this is the search termination
1611                  * or the pager might contain the page, allocate a new page.
1612                  */
1613                 if (TRYPAGER(fs) || fs->object == fs->first_object) {
1614                         /*
1615                          * Allocating, must be exclusive.
1616                          */
1617                         if (fs->object == fs->first_object &&
1618                             fs->first_shared) {
1619                                 fs->first_shared = 0;
1620                                 vm_object_pip_wakeup(fs->first_object);
1621                                 vm_object_chain_release_all(fs->first_object,
1622                                                             fs->object);
1623                                 if (fs->object != fs->first_object)
1624                                         vm_object_drop(fs->object);
1625                                 unlock_and_deallocate(fs);
1626                                 return (KERN_TRY_AGAIN);
1627                         }
1628                         if (fs->object != fs->first_object &&
1629                             fs->shared) {
1630                                 fs->first_shared = 0;
1631                                 fs->shared = 0;
1632                                 vm_object_pip_wakeup(fs->first_object);
1633                                 vm_object_chain_release_all(fs->first_object,
1634                                                             fs->object);
1635                                 if (fs->object != fs->first_object)
1636                                         vm_object_drop(fs->object);
1637                                 unlock_and_deallocate(fs);
1638                                 return (KERN_TRY_AGAIN);
1639                         }
1640
1641                         /*
1642                          * If the page is beyond the object size we fail
1643                          */
1644                         if (pindex >= fs->object->size) {
1645                                 vm_object_pip_wakeup(fs->first_object);
1646                                 vm_object_chain_release_all(fs->first_object,
1647                                                             fs->object);
1648                                 if (fs->object != fs->first_object)
1649                                         vm_object_drop(fs->object);
1650                                 unlock_and_deallocate(fs);
1651                                 return (KERN_PROTECTION_FAILURE);
1652                         }
1653
1654                         /*
1655                          * Allocate a new page for this object/offset pair.
1656                          *
1657                          * It is possible for the allocation to race, so
1658                          * handle the case.
1659                          */
1660                         fs->m = NULL;
1661                         if (!vm_page_count_severe()) {
1662                                 fs->m = vm_page_alloc(fs->object, pindex,
1663                                     ((fs->vp || fs->object->backing_object) ?
1664                                         VM_ALLOC_NULL_OK | VM_ALLOC_NORMAL :
1665                                         VM_ALLOC_NULL_OK | VM_ALLOC_NORMAL |
1666                                         VM_ALLOC_USE_GD | VM_ALLOC_ZERO));
1667                         }
1668                         if (fs->m == NULL) {
1669                                 vm_object_pip_wakeup(fs->first_object);
1670                                 vm_object_chain_release_all(fs->first_object,
1671                                                             fs->object);
1672                                 if (fs->object != fs->first_object)
1673                                         vm_object_drop(fs->object);
1674                                 unlock_and_deallocate(fs);
1675                                 if (allow_nofault == 0 ||
1676                                     (curthread->td_flags & TDF_NOFAULT) == 0) {
1677                                         thread_t td;
1678
1679                                         vm_wait_pfault();
1680                                         td = curthread;
1681                                         if (td->td_proc && (td->td_proc->p_flags & P_LOWMEMKILL))
1682                                                 return (KERN_PROTECTION_FAILURE);
1683                                 }
1684                                 return (KERN_TRY_AGAIN);
1685                         }
1686
1687                         /*
1688                          * Fall through to readrest.  We have a new page which
1689                          * will have to be paged (since m->valid will be 0).
1690                          */
1691                 }
1692
1693 readrest:
1694                 /*
1695                  * We have found an invalid or partially valid page, a
1696                  * page with a read-ahead mark which might be partially or
1697                  * fully valid (and maybe dirty too), or we have allocated
1698                  * a new page.
1699                  *
1700                  * Attempt to fault-in the page if there is a chance that the
1701                  * pager has it, and potentially fault in additional pages
1702                  * at the same time.
1703                  *
1704                  * If TRYPAGER is true then fs.m will be non-NULL and busied
1705                  * for us.
1706                  */
1707                 if (TRYPAGER(fs)) {
1708                         int rv;
1709                         int seqaccess;
1710                         u_char behavior = vm_map_entry_behavior(fs->entry);
1711
1712                         if (behavior == MAP_ENTRY_BEHAV_RANDOM)
1713                                 seqaccess = 0;
1714                         else
1715                                 seqaccess = -1;
1716
1717                         /*
1718                          * Doing I/O may synchronously insert additional
1719                          * pages so we can't be shared at this point either.
1720                          *
1721                          * NOTE: We can't free fs->m here in the allocated
1722                          *       case (fs->object != fs->first_object) as
1723                          *       this would require an exclusively locked
1724                          *       VM object.
1725                          */
1726                         if (fs->object == fs->first_object &&
1727                             fs->first_shared) {
1728                                 vm_page_deactivate(fs->m);
1729                                 vm_page_wakeup(fs->m);
1730                                 fs->m = NULL;
1731                                 fs->first_shared = 0;
1732                                 vm_object_pip_wakeup(fs->first_object);
1733                                 vm_object_chain_release_all(fs->first_object,
1734                                                             fs->object);
1735                                 if (fs->object != fs->first_object)
1736                                         vm_object_drop(fs->object);
1737                                 unlock_and_deallocate(fs);
1738                                 return (KERN_TRY_AGAIN);
1739                         }
1740                         if (fs->object != fs->first_object &&
1741                             fs->shared) {
1742                                 vm_page_deactivate(fs->m);
1743                                 vm_page_wakeup(fs->m);
1744                                 fs->m = NULL;
1745                                 fs->first_shared = 0;
1746                                 fs->shared = 0;
1747                                 vm_object_pip_wakeup(fs->first_object);
1748                                 vm_object_chain_release_all(fs->first_object,
1749                                                             fs->object);
1750                                 if (fs->object != fs->first_object)
1751                                         vm_object_drop(fs->object);
1752                                 unlock_and_deallocate(fs);
1753                                 return (KERN_TRY_AGAIN);
1754                         }
1755
1756                         /*
1757                          * Avoid deadlocking against the map when doing I/O.
1758                          * fs.object and the page is BUSY'd.
1759                          *
1760                          * NOTE: Once unlocked, fs->entry can become stale
1761                          *       so this will NULL it out.
1762                          *
1763                          * NOTE: fs->entry is invalid until we relock the
1764                          *       map and verify that the timestamp has not
1765                          *       changed.
1766                          */
1767                         unlock_map(fs);
1768
1769                         /*
1770                          * Acquire the page data.  We still hold a ref on
1771                          * fs.object and the page has been BUSY's.
1772                          *
1773                          * The pager may replace the page (for example, in
1774                          * order to enter a fictitious page into the
1775                          * object).  If it does so it is responsible for
1776                          * cleaning up the passed page and properly setting
1777                          * the new page BUSY.
1778                          *
1779                          * If we got here through a PG_RAM read-ahead
1780                          * mark the page may be partially dirty and thus
1781                          * not freeable.  Don't bother checking to see
1782                          * if the pager has the page because we can't free
1783                          * it anyway.  We have to depend on the get_page
1784                          * operation filling in any gaps whether there is
1785                          * backing store or not.
1786                          */
1787                         rv = vm_pager_get_page(fs->object, &fs->m, seqaccess);
1788
1789                         if (rv == VM_PAGER_OK) {
1790                                 /*
1791                                  * Relookup in case pager changed page. Pager
1792                                  * is responsible for disposition of old page
1793                                  * if moved.
1794                                  *
1795                                  * XXX other code segments do relookups too.
1796                                  * It's a bad abstraction that needs to be
1797                                  * fixed/removed.
1798                                  */
1799                                 fs->m = vm_page_lookup(fs->object, pindex);
1800                                 if (fs->m == NULL) {
1801                                         vm_object_pip_wakeup(fs->first_object);
1802                                         vm_object_chain_release_all(
1803                                                 fs->first_object, fs->object);
1804                                         if (fs->object != fs->first_object)
1805                                                 vm_object_drop(fs->object);
1806                                         unlock_and_deallocate(fs);
1807                                         return (KERN_TRY_AGAIN);
1808                                 }
1809                                 ++fs->hardfault;
1810                                 break; /* break to PAGE HAS BEEN FOUND */
1811                         }
1812
1813                         /*
1814                          * Remove the bogus page (which does not exist at this
1815                          * object/offset); before doing so, we must get back
1816                          * our object lock to preserve our invariant.
1817                          *
1818                          * Also wake up any other process that may want to bring
1819                          * in this page.
1820                          *
1821                          * If this is the top-level object, we must leave the
1822                          * busy page to prevent another process from rushing
1823                          * past us, and inserting the page in that object at
1824                          * the same time that we are.
1825                          */
1826                         if (rv == VM_PAGER_ERROR) {
1827                                 if (curproc) {
1828                                         kprintf("vm_fault: pager read error, "
1829                                                 "pid %d (%s)\n",
1830                                                 curproc->p_pid,
1831                                                 curproc->p_comm);
1832                                 } else {
1833                                         kprintf("vm_fault: pager read error, "
1834                                                 "thread %p (%s)\n",
1835                                                 curthread,
1836                                                 curproc->p_comm);
1837                                 }
1838                         }
1839
1840                         /*
1841                          * Data outside the range of the pager or an I/O error
1842                          *
1843                          * The page may have been wired during the pagein,
1844                          * e.g. by the buffer cache, and cannot simply be
1845                          * freed.  Call vnode_pager_freepage() to deal with it.
1846                          *
1847                          * Also note that we cannot free the page if we are
1848                          * holding the related object shared. XXX not sure
1849                          * what to do in that case.
1850                          */
1851                         if (fs->object != fs->first_object) {
1852                                 /*
1853                                  * Scrap the page.  Check to see if the
1854                                  * vm_pager_get_page() call has already
1855                                  * dealt with it.
1856                                  */
1857                                 if (fs->m) {
1858                                         vnode_pager_freepage(fs->m);
1859                                         fs->m = NULL;
1860                                 }
1861
1862                                 /*
1863                                  * XXX - we cannot just fall out at this
1864                                  * point, m has been freed and is invalid!
1865                                  */
1866                         }
1867                         /*
1868                          * XXX - the check for kernel_map is a kludge to work
1869                          * around having the machine panic on a kernel space
1870                          * fault w/ I/O error.
1871                          */
1872                         if (((fs->map != &kernel_map) &&
1873                             (rv == VM_PAGER_ERROR)) || (rv == VM_PAGER_BAD)) {
1874                                 if (fs->m) {
1875                                         if (fs->first_shared) {
1876                                                 vm_page_deactivate(fs->m);
1877                                                 vm_page_wakeup(fs->m);
1878                                         } else {
1879                                                 vnode_pager_freepage(fs->m);
1880                                         }
1881                                         fs->m = NULL;
1882                                 }
1883                                 vm_object_pip_wakeup(fs->first_object);
1884                                 vm_object_chain_release_all(fs->first_object,
1885                                                             fs->object);
1886                                 if (fs->object != fs->first_object)
1887                                         vm_object_drop(fs->object);
1888                                 unlock_and_deallocate(fs);
1889                                 if (rv == VM_PAGER_ERROR)
1890                                         return (KERN_FAILURE);
1891                                 else
1892                                         return (KERN_PROTECTION_FAILURE);
1893                                 /* NOT REACHED */
1894                         }
1895                 }
1896
1897                 /*
1898                  * We get here if the object has a default pager (or unwiring) 
1899                  * or the pager doesn't have the page.
1900                  *
1901                  * fs->first_m will be used for the COW unless we find a
1902                  * deeper page to be mapped read-only, in which case the
1903                  * unlock*(fs) will free first_m.
1904                  */
1905                 if (fs->object == fs->first_object)
1906                         fs->first_m = fs->m;
1907
1908                 /*
1909                  * Move on to the next object.  The chain lock should prevent
1910                  * the backing_object from getting ripped out from under us.
1911                  *
1912                  * The object lock for the next object is governed by
1913                  * fs->shared.
1914                  */
1915                 if ((next_object = fs->object->backing_object) != NULL) {
1916                         if (fs->shared)
1917                                 vm_object_hold_shared(next_object);
1918                         else
1919                                 vm_object_hold(next_object);
1920                         vm_object_chain_acquire(next_object, fs->shared);
1921                         KKASSERT(next_object == fs->object->backing_object);
1922                         pindex += OFF_TO_IDX(fs->object->backing_object_offset);
1923                 }
1924
1925                 if (next_object == NULL) {
1926                         /*
1927                          * If there's no object left, fill the page in the top
1928                          * object with zeros.
1929                          */
1930                         if (fs->object != fs->first_object) {
1931 #if 0
1932                                 if (fs->first_object->backing_object !=
1933                                     fs->object) {
1934                                         vm_object_hold(fs->first_object->backing_object);
1935                                 }
1936 #endif
1937                                 vm_object_chain_release_all(
1938                                         fs->first_object->backing_object,
1939                                         fs->object);
1940 #if 0
1941                                 if (fs->first_object->backing_object !=
1942                                     fs->object) {
1943                                         vm_object_drop(fs->first_object->backing_object);
1944                                 }
1945 #endif
1946                                 vm_object_pip_wakeup(fs->object);
1947                                 vm_object_drop(fs->object);
1948                                 fs->object = fs->first_object;
1949                                 pindex = first_pindex;
1950                                 fs->m = fs->first_m;
1951                         }
1952                         fs->first_m = NULL;
1953
1954                         /*
1955                          * Zero the page and mark it valid.
1956                          */
1957                         vm_page_zero_fill(fs->m);
1958                         mycpu->gd_cnt.v_zfod++;
1959                         fs->m->valid = VM_PAGE_BITS_ALL;
1960                         break;  /* break to PAGE HAS BEEN FOUND */
1961                 }
1962                 if (fs->object != fs->first_object) {
1963                         vm_object_pip_wakeup(fs->object);
1964                         vm_object_lock_swap();
1965                         vm_object_drop(fs->object);
1966                 }
1967                 KASSERT(fs->object != next_object,
1968                         ("object loop %p", next_object));
1969                 fs->object = next_object;
1970                 vm_object_pip_add(fs->object, 1);
1971         }
1972
1973         /*
1974          * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock
1975          * is held.]
1976          *
1977          * object still held.
1978          *
1979          * local shared variable may be different from fs->shared.
1980          *
1981          * If the page is being written, but isn't already owned by the
1982          * top-level object, we have to copy it into a new page owned by the
1983          * top-level object.
1984          */
1985         KASSERT((fs->m->busy_count & PBUSY_LOCKED) != 0,
1986                 ("vm_fault: not busy after main loop"));
1987
1988         if (fs->object != fs->first_object) {
1989                 /*
1990                  * We only really need to copy if we want to write it.
1991                  */
1992                 if (fault_type & VM_PROT_WRITE) {
1993                         /*
1994                          * This allows pages to be virtually copied from a 
1995                          * backing_object into the first_object, where the 
1996                          * backing object has no other refs to it, and cannot
1997                          * gain any more refs.  Instead of a bcopy, we just 
1998                          * move the page from the backing object to the 
1999                          * first object.  Note that we must mark the page 
2000                          * dirty in the first object so that it will go out 
2001                          * to swap when needed.
2002                          */
2003                         if (
2004                                 /*
2005                                  * Must be holding exclusive locks
2006                                  */
2007                                 fs->first_shared == 0 &&
2008                                 fs->shared == 0 &&
2009                                 /*
2010                                  * Map, if present, has not changed
2011                                  */
2012                                 (fs->map == NULL ||
2013                                 fs->map_generation == fs->map->timestamp) &&
2014                                 /*
2015                                  * Only one shadow object
2016                                  */
2017                                 (fs->object->shadow_count == 1) &&
2018                                 /*
2019                                  * No COW refs, except us
2020                                  */
2021                                 (fs->object->ref_count == 1) &&
2022                                 /*
2023                                  * No one else can look this object up
2024                                  */
2025                                 (fs->object->handle == NULL) &&
2026                                 /*
2027                                  * No other ways to look the object up
2028                                  */
2029                                 ((fs->object->type == OBJT_DEFAULT) ||
2030                                  (fs->object->type == OBJT_SWAP)) &&
2031                                 /*
2032                                  * We don't chase down the shadow chain
2033                                  */
2034                                 (fs->object == fs->first_object->backing_object) &&
2035
2036                                 /*
2037                                  * grab the lock if we need to
2038                                  */
2039                                 (fs->lookup_still_valid ||
2040                                  fs->map == NULL ||
2041                                  lockmgr(&fs->map->lock, LK_EXCLUSIVE|LK_NOWAIT) == 0)
2042                             ) {
2043                                 /*
2044                                  * (first_m) and (m) are both busied.  We have
2045                                  * move (m) into (first_m)'s object/pindex
2046                                  * in an atomic fashion, then free (first_m).
2047                                  *
2048                                  * first_object is held so second remove
2049                                  * followed by the rename should wind
2050                                  * up being atomic.  vm_page_free() might
2051                                  * block so we don't do it until after the
2052                                  * rename.
2053                                  */
2054                                 fs->lookup_still_valid = 1;
2055                                 vm_page_protect(fs->first_m, VM_PROT_NONE);
2056                                 vm_page_remove(fs->first_m);
2057                                 vm_page_rename(fs->m, fs->first_object,
2058                                                first_pindex);
2059                                 vm_page_free(fs->first_m);
2060                                 fs->first_m = fs->m;
2061                                 fs->m = NULL;
2062                                 mycpu->gd_cnt.v_cow_optim++;
2063                         } else {
2064                                 /*
2065                                  * Oh, well, lets copy it.
2066                                  *
2067                                  * Why are we unmapping the original page
2068                                  * here?  Well, in short, not all accessors
2069                                  * of user memory go through the pmap.  The
2070                                  * procfs code doesn't have access user memory
2071                                  * via a local pmap, so vm_fault_page*()
2072                                  * can't call pmap_enter().  And the umtx*()
2073                                  * code may modify the COW'd page via a DMAP
2074                                  * or kernel mapping and not via the pmap,
2075                                  * leaving the original page still mapped
2076                                  * read-only into the pmap.
2077                                  *
2078                                  * So we have to remove the page from at
2079                                  * least the current pmap if it is in it.
2080                                  *
2081                                  * We used to just remove it from all pmaps
2082                                  * but that creates inefficiencies on SMP,
2083                                  * particularly for COW program & library
2084                                  * mappings that are concurrently exec'd.
2085                                  * Only remove the page from the current
2086                                  * pmap.
2087                                  */
2088                                 KKASSERT(fs->first_shared == 0);
2089                                 vm_page_copy(fs->m, fs->first_m);
2090                                 /*vm_page_protect(fs->m, VM_PROT_NONE);*/
2091                                 pmap_remove_specific(
2092                                     &curthread->td_lwp->lwp_vmspace->vm_pmap,
2093                                     fs->m);
2094                         }
2095
2096                         /*
2097                          * We no longer need the old page or object.
2098                          */
2099                         if (fs->m)
2100                                 release_page(fs);
2101
2102                         /*
2103                          * We intend to revert to first_object, undo the
2104                          * chain lock through to that.
2105                          */
2106 #if 0
2107                         if (fs->first_object->backing_object != fs->object)
2108                                 vm_object_hold(fs->first_object->backing_object);
2109 #endif
2110                         vm_object_chain_release_all(
2111                                         fs->first_object->backing_object,
2112                                         fs->object);
2113 #if 0
2114                         if (fs->first_object->backing_object != fs->object)
2115                                 vm_object_drop(fs->first_object->backing_object);
2116 #endif
2117
2118                         /*
2119                          * fs->object != fs->first_object due to above 
2120                          * conditional
2121                          */
2122                         vm_object_pip_wakeup(fs->object);
2123                         vm_object_drop(fs->object);
2124
2125                         /*
2126                          * Only use the new page below...
2127                          */
2128                         mycpu->gd_cnt.v_cow_faults++;
2129                         fs->m = fs->first_m;
2130                         fs->object = fs->first_object;
2131                         pindex = first_pindex;
2132                 } else {
2133                         /*
2134                          * If it wasn't a write fault avoid having to copy
2135                          * the page by mapping it read-only.
2136                          */
2137                         fs->prot &= ~VM_PROT_WRITE;
2138                 }
2139         }
2140
2141         /*
2142          * Relock the map if necessary, then check the generation count.
2143          * relock_map() will update fs->timestamp to account for the
2144          * relocking if necessary.
2145          *
2146          * If the count has changed after relocking then all sorts of
2147          * crap may have happened and we have to retry.
2148          *
2149          * NOTE: The relock_map() can fail due to a deadlock against
2150          *       the vm_page we are holding BUSY.
2151          */
2152         if (fs->lookup_still_valid == FALSE && fs->map) {
2153                 if (relock_map(fs) ||
2154                     fs->map->timestamp != fs->map_generation) {
2155                         release_page(fs);
2156                         vm_object_pip_wakeup(fs->first_object);
2157                         vm_object_chain_release_all(fs->first_object,
2158                                                     fs->object);
2159                         if (fs->object != fs->first_object)
2160                                 vm_object_drop(fs->object);
2161                         unlock_and_deallocate(fs);
2162                         return (KERN_TRY_AGAIN);
2163                 }
2164         }
2165
2166         /*
2167          * If the fault is a write, we know that this page is being
2168          * written NOW so dirty it explicitly to save on pmap_is_modified()
2169          * calls later.
2170          *
2171          * If this is a NOSYNC mmap we do not want to set PG_NOSYNC
2172          * if the page is already dirty to prevent data written with
2173          * the expectation of being synced from not being synced.
2174          * Likewise if this entry does not request NOSYNC then make
2175          * sure the page isn't marked NOSYNC.  Applications sharing
2176          * data should use the same flags to avoid ping ponging.
2177          *
2178          * Also tell the backing pager, if any, that it should remove
2179          * any swap backing since the page is now dirty.
2180          */
2181         vm_page_activate(fs->m);
2182         if (fs->prot & VM_PROT_WRITE) {
2183                 vm_object_set_writeable_dirty(fs->m->object);
2184                 vm_set_nosync(fs->m, fs->entry);
2185                 if (fs->fault_flags & VM_FAULT_DIRTY) {
2186                         vm_page_dirty(fs->m);
2187                         if (fs->m->flags & PG_SWAPPED) {
2188                                 /*
2189                                  * If the page is swapped out we have to call
2190                                  * swap_pager_unswapped() which requires an
2191                                  * exclusive object lock.  If we are shared,
2192                                  * we must clear the shared flag and retry.
2193                                  */
2194                                 if ((fs->object == fs->first_object &&
2195                                      fs->first_shared) ||
2196                                     (fs->object != fs->first_object &&
2197                                      fs->shared)) {
2198                                         vm_page_wakeup(fs->m);
2199                                         fs->m = NULL;
2200                                         if (fs->object == fs->first_object)
2201                                                 fs->first_shared = 0;
2202                                         else
2203                                                 fs->shared = 0;
2204                                         vm_object_pip_wakeup(fs->first_object);
2205                                         vm_object_chain_release_all(
2206                                                 fs->first_object, fs->object);
2207                                         if (fs->object != fs->first_object)
2208                                                 vm_object_drop(fs->object);
2209                                         unlock_and_deallocate(fs);
2210                                         return (KERN_TRY_AGAIN);
2211                                 }
2212                                 swap_pager_unswapped(fs->m);
2213                         }
2214                 }
2215         }
2216
2217         vm_object_pip_wakeup(fs->first_object);
2218         vm_object_chain_release_all(fs->first_object, fs->object);
2219         if (fs->object != fs->first_object)
2220                 vm_object_drop(fs->object);
2221
2222         /*
2223          * Page had better still be busy.  We are still locked up and 
2224          * fs->object will have another PIP reference if it is not equal
2225          * to fs->first_object.
2226          */
2227         KASSERT(fs->m->busy_count & PBUSY_LOCKED,
2228                 ("vm_fault: page %p not busy!", fs->m));
2229
2230         /*
2231          * Sanity check: page must be completely valid or it is not fit to
2232          * map into user space.  vm_pager_get_pages() ensures this.
2233          */
2234         if (fs->m->valid != VM_PAGE_BITS_ALL) {
2235                 vm_page_zero_invalid(fs->m, TRUE);
2236                 kprintf("Warning: page %p partially invalid on fault\n", fs->m);
2237         }
2238
2239         return (KERN_SUCCESS);
2240 }
2241
2242 /*
2243  * Wire down a range of virtual addresses in a map.  The entry in question
2244  * should be marked in-transition and the map must be locked.  We must
2245  * release the map temporarily while faulting-in the page to avoid a
2246  * deadlock.  Note that the entry may be clipped while we are blocked but
2247  * will never be freed.
2248  *
2249  * No requirements.
2250  */
2251 int
2252 vm_fault_wire(vm_map_t map, vm_map_entry_t entry,
2253               boolean_t user_wire, int kmflags)
2254 {
2255         boolean_t fictitious;
2256         vm_offset_t start;
2257         vm_offset_t end;
2258         vm_offset_t va;
2259         pmap_t pmap;
2260         int rv;
2261         int wire_prot;
2262         int fault_flags;
2263         vm_page_t m;
2264
2265         if (user_wire) {
2266                 wire_prot = VM_PROT_READ;
2267                 fault_flags = VM_FAULT_USER_WIRE;
2268         } else {
2269                 wire_prot = VM_PROT_READ | VM_PROT_WRITE;
2270                 fault_flags = VM_FAULT_CHANGE_WIRING;
2271         }
2272         if (kmflags & KM_NOTLBSYNC)
2273                 wire_prot |= VM_PROT_NOSYNC;
2274
2275         pmap = vm_map_pmap(map);
2276         start = entry->start;
2277         end = entry->end;
2278
2279         switch(entry->maptype) {
2280         case VM_MAPTYPE_NORMAL:
2281         case VM_MAPTYPE_VPAGETABLE:
2282                 fictitious = entry->object.vm_object &&
2283                             ((entry->object.vm_object->type == OBJT_DEVICE) ||
2284                              (entry->object.vm_object->type == OBJT_MGTDEVICE));
2285                 break;
2286         case VM_MAPTYPE_UKSMAP:
2287                 fictitious = TRUE;
2288                 break;
2289         default:
2290                 fictitious = FALSE;
2291                 break;
2292         }
2293
2294         if (entry->eflags & MAP_ENTRY_KSTACK)
2295                 start += PAGE_SIZE;
2296         map->timestamp++;
2297         vm_map_unlock(map);
2298
2299         /*
2300          * We simulate a fault to get the page and enter it in the physical
2301          * map.
2302          */
2303         for (va = start; va < end; va += PAGE_SIZE) {
2304                 rv = vm_fault(map, va, wire_prot, fault_flags);
2305                 if (rv) {
2306                         while (va > start) {
2307                                 va -= PAGE_SIZE;
2308                                 m = pmap_unwire(pmap, va);
2309                                 if (m && !fictitious) {
2310                                         vm_page_busy_wait(m, FALSE, "vmwrpg");
2311                                         vm_page_unwire(m, 1);
2312                                         vm_page_wakeup(m);
2313                                 }
2314                         }
2315                         goto done;
2316                 }
2317         }
2318         rv = KERN_SUCCESS;
2319 done:
2320         vm_map_lock(map);
2321
2322         return (rv);
2323 }
2324
2325 /*
2326  * Unwire a range of virtual addresses in a map.  The map should be
2327  * locked.
2328  */
2329 void
2330 vm_fault_unwire(vm_map_t map, vm_map_entry_t entry)
2331 {
2332         boolean_t fictitious;
2333         vm_offset_t start;
2334         vm_offset_t end;
2335         vm_offset_t va;
2336         pmap_t pmap;
2337         vm_page_t m;
2338
2339         pmap = vm_map_pmap(map);
2340         start = entry->start;
2341         end = entry->end;
2342         fictitious = entry->object.vm_object &&
2343                         ((entry->object.vm_object->type == OBJT_DEVICE) ||
2344                          (entry->object.vm_object->type == OBJT_MGTDEVICE));
2345         if (entry->eflags & MAP_ENTRY_KSTACK)
2346                 start += PAGE_SIZE;
2347
2348         /*
2349          * Since the pages are wired down, we must be able to get their
2350          * mappings from the physical map system.
2351          */
2352         for (va = start; va < end; va += PAGE_SIZE) {
2353                 m = pmap_unwire(pmap, va);
2354                 if (m && !fictitious) {
2355                         vm_page_busy_wait(m, FALSE, "vmwrpg");
2356                         vm_page_unwire(m, 1);
2357                         vm_page_wakeup(m);
2358                 }
2359         }
2360 }
2361
2362 /*
2363  * Copy all of the pages from a wired-down map entry to another.
2364  *
2365  * The source and destination maps must be locked for write.
2366  * The source and destination maps token must be held
2367  * The source map entry must be wired down (or be a sharing map
2368  * entry corresponding to a main map entry that is wired down).
2369  *
2370  * No other requirements.
2371  *
2372  * XXX do segment optimization
2373  */
2374 void
2375 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
2376                     vm_map_entry_t dst_entry, vm_map_entry_t src_entry)
2377 {
2378         vm_object_t dst_object;
2379         vm_object_t src_object;
2380         vm_ooffset_t dst_offset;
2381         vm_ooffset_t src_offset;
2382         vm_prot_t prot;
2383         vm_offset_t vaddr;
2384         vm_page_t dst_m;
2385         vm_page_t src_m;
2386
2387         src_object = src_entry->object.vm_object;
2388         src_offset = src_entry->offset;
2389
2390         /*
2391          * Create the top-level object for the destination entry. (Doesn't
2392          * actually shadow anything - we copy the pages directly.)
2393          */
2394         vm_map_entry_allocate_object(dst_entry);
2395         dst_object = dst_entry->object.vm_object;
2396
2397         prot = dst_entry->max_protection;
2398
2399         /*
2400          * Loop through all of the pages in the entry's range, copying each
2401          * one from the source object (it should be there) to the destination
2402          * object.
2403          */
2404         vm_object_hold(src_object);
2405         vm_object_hold(dst_object);
2406         for (vaddr = dst_entry->start, dst_offset = 0;
2407              vaddr < dst_entry->end;
2408              vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) {
2409
2410                 /*
2411                  * Allocate a page in the destination object
2412                  */
2413                 do {
2414                         dst_m = vm_page_alloc(dst_object,
2415                                               OFF_TO_IDX(dst_offset),
2416                                               VM_ALLOC_NORMAL);
2417                         if (dst_m == NULL) {
2418                                 vm_wait(0);
2419                         }
2420                 } while (dst_m == NULL);
2421
2422                 /*
2423                  * Find the page in the source object, and copy it in.
2424                  * (Because the source is wired down, the page will be in
2425                  * memory.)
2426                  */
2427                 src_m = vm_page_lookup(src_object,
2428                                        OFF_TO_IDX(dst_offset + src_offset));
2429                 if (src_m == NULL)
2430                         panic("vm_fault_copy_wired: page missing");
2431
2432                 vm_page_copy(src_m, dst_m);
2433
2434                 /*
2435                  * Enter it in the pmap...
2436                  */
2437                 pmap_enter(dst_map->pmap, vaddr, dst_m, prot, FALSE, dst_entry);
2438
2439                 /*
2440                  * Mark it no longer busy, and put it on the active list.
2441                  */
2442                 vm_page_activate(dst_m);
2443                 vm_page_wakeup(dst_m);
2444         }
2445         vm_object_drop(dst_object);
2446         vm_object_drop(src_object);
2447 }
2448
2449 #if 0
2450
2451 /*
2452  * This routine checks around the requested page for other pages that
2453  * might be able to be faulted in.  This routine brackets the viable
2454  * pages for the pages to be paged in.
2455  *
2456  * Inputs:
2457  *      m, rbehind, rahead
2458  *
2459  * Outputs:
2460  *  marray (array of vm_page_t), reqpage (index of requested page)
2461  *
2462  * Return value:
2463  *  number of pages in marray
2464  */
2465 static int
2466 vm_fault_additional_pages(vm_page_t m, int rbehind, int rahead,
2467                           vm_page_t *marray, int *reqpage)
2468 {
2469         int i,j;
2470         vm_object_t object;
2471         vm_pindex_t pindex, startpindex, endpindex, tpindex;
2472         vm_page_t rtm;
2473         int cbehind, cahead;
2474
2475         object = m->object;
2476         pindex = m->pindex;
2477
2478         /*
2479          * we don't fault-ahead for device pager
2480          */
2481         if ((object->type == OBJT_DEVICE) ||
2482             (object->type == OBJT_MGTDEVICE)) {
2483                 *reqpage = 0;
2484                 marray[0] = m;
2485                 return 1;
2486         }
2487
2488         /*
2489          * if the requested page is not available, then give up now
2490          */
2491         if (!vm_pager_has_page(object, pindex, &cbehind, &cahead)) {
2492                 *reqpage = 0;   /* not used by caller, fix compiler warn */
2493                 return 0;
2494         }
2495
2496         if ((cbehind == 0) && (cahead == 0)) {
2497                 *reqpage = 0;
2498                 marray[0] = m;
2499                 return 1;
2500         }
2501
2502         if (rahead > cahead) {
2503                 rahead = cahead;
2504         }
2505
2506         if (rbehind > cbehind) {
2507                 rbehind = cbehind;
2508         }
2509
2510         /*
2511          * Do not do any readahead if we have insufficient free memory.
2512          *
2513          * XXX code was broken disabled before and has instability
2514          * with this conditonal fixed, so shortcut for now.
2515          */
2516         if (burst_fault == 0 || vm_page_count_severe()) {
2517                 marray[0] = m;
2518                 *reqpage = 0;
2519                 return 1;
2520         }
2521
2522         /*
2523          * scan backward for the read behind pages -- in memory 
2524          *
2525          * Assume that if the page is not found an interrupt will not
2526          * create it.  Theoretically interrupts can only remove (busy)
2527          * pages, not create new associations.
2528          */
2529         if (pindex > 0) {
2530                 if (rbehind > pindex) {
2531                         rbehind = pindex;
2532                         startpindex = 0;
2533                 } else {
2534                         startpindex = pindex - rbehind;
2535                 }
2536
2537                 vm_object_hold(object);
2538                 for (tpindex = pindex; tpindex > startpindex; --tpindex) {
2539                         if (vm_page_lookup(object, tpindex - 1))
2540                                 break;
2541                 }
2542
2543                 i = 0;
2544                 while (tpindex < pindex) {
2545                         rtm = vm_page_alloc(object, tpindex, VM_ALLOC_SYSTEM |
2546                                                              VM_ALLOC_NULL_OK);
2547                         if (rtm == NULL) {
2548                                 for (j = 0; j < i; j++) {
2549                                         vm_page_free(marray[j]);
2550                                 }
2551                                 vm_object_drop(object);
2552                                 marray[0] = m;
2553                                 *reqpage = 0;
2554                                 return 1;
2555                         }
2556                         marray[i] = rtm;
2557                         ++i;
2558                         ++tpindex;
2559                 }
2560                 vm_object_drop(object);
2561         } else {
2562                 i = 0;
2563         }
2564
2565         /*
2566          * Assign requested page
2567          */
2568         marray[i] = m;
2569         *reqpage = i;
2570         ++i;
2571
2572         /*
2573          * Scan forwards for read-ahead pages
2574          */
2575         tpindex = pindex + 1;
2576         endpindex = tpindex + rahead;
2577         if (endpindex > object->size)
2578                 endpindex = object->size;
2579
2580         vm_object_hold(object);
2581         while (tpindex < endpindex) {
2582                 if (vm_page_lookup(object, tpindex))
2583                         break;
2584                 rtm = vm_page_alloc(object, tpindex, VM_ALLOC_SYSTEM |
2585                                                      VM_ALLOC_NULL_OK);
2586                 if (rtm == NULL)
2587                         break;
2588                 marray[i] = rtm;
2589                 ++i;
2590                 ++tpindex;
2591         }
2592         vm_object_drop(object);
2593
2594         return (i);
2595 }
2596
2597 #endif
2598
2599 /*
2600  * vm_prefault() provides a quick way of clustering pagefaults into a
2601  * processes address space.  It is a "cousin" of pmap_object_init_pt,
2602  * except it runs at page fault time instead of mmap time.
2603  *
2604  * vm.fast_fault        Enables pre-faulting zero-fill pages
2605  *
2606  * vm.prefault_pages    Number of pages (1/2 negative, 1/2 positive) to
2607  *                      prefault.  Scan stops in either direction when
2608  *                      a page is found to already exist.
2609  *
2610  * This code used to be per-platform pmap_prefault().  It is now
2611  * machine-independent and enhanced to also pre-fault zero-fill pages
2612  * (see vm.fast_fault) as well as make them writable, which greatly
2613  * reduces the number of page faults programs incur.
2614  *
2615  * Application performance when pre-faulting zero-fill pages is heavily
2616  * dependent on the application.  Very tiny applications like /bin/echo
2617  * lose a little performance while applications of any appreciable size
2618  * gain performance.  Prefaulting multiple pages also reduces SMP
2619  * congestion and can improve SMP performance significantly.
2620  *
2621  * NOTE!  prot may allow writing but this only applies to the top level
2622  *        object.  If we wind up mapping a page extracted from a backing
2623  *        object we have to make sure it is read-only.
2624  *
2625  * NOTE!  The caller has already handled any COW operations on the
2626  *        vm_map_entry via the normal fault code.  Do NOT call this
2627  *        shortcut unless the normal fault code has run on this entry.
2628  *
2629  * The related map must be locked.
2630  * No other requirements.
2631  */
2632 static int vm_prefault_pages = 8;
2633 SYSCTL_INT(_vm, OID_AUTO, prefault_pages, CTLFLAG_RW, &vm_prefault_pages, 0,
2634            "Maximum number of pages to pre-fault");
2635 static int vm_fast_fault = 1;
2636 SYSCTL_INT(_vm, OID_AUTO, fast_fault, CTLFLAG_RW, &vm_fast_fault, 0,
2637            "Burst fault zero-fill regions");
2638
2639 /*
2640  * Set PG_NOSYNC if the map entry indicates so, but only if the page
2641  * is not already dirty by other means.  This will prevent passive
2642  * filesystem syncing as well as 'sync' from writing out the page.
2643  */
2644 static void
2645 vm_set_nosync(vm_page_t m, vm_map_entry_t entry)
2646 {
2647         if (entry->eflags & MAP_ENTRY_NOSYNC) {
2648                 if (m->dirty == 0)
2649                         vm_page_flag_set(m, PG_NOSYNC);
2650         } else {
2651                 vm_page_flag_clear(m, PG_NOSYNC);
2652         }
2653 }
2654
2655 static void
2656 vm_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry, int prot,
2657             int fault_flags)
2658 {
2659         struct lwp *lp;
2660         vm_page_t m;
2661         vm_offset_t addr;
2662         vm_pindex_t index;
2663         vm_pindex_t pindex;
2664         vm_object_t object;
2665         int pprot;
2666         int i;
2667         int noneg;
2668         int nopos;
2669         int maxpages;
2670
2671         /*
2672          * Get stable max count value, disabled if set to 0
2673          */
2674         maxpages = vm_prefault_pages;
2675         cpu_ccfence();
2676         if (maxpages <= 0)
2677                 return;
2678
2679         /*
2680          * We do not currently prefault mappings that use virtual page
2681          * tables.  We do not prefault foreign pmaps.
2682          */
2683         if (entry->maptype != VM_MAPTYPE_NORMAL)
2684                 return;
2685         lp = curthread->td_lwp;
2686         if (lp == NULL || (pmap != vmspace_pmap(lp->lwp_vmspace)))
2687                 return;
2688
2689         /*
2690          * Limit pre-fault count to 1024 pages.
2691          */
2692         if (maxpages > 1024)
2693                 maxpages = 1024;
2694
2695         object = entry->object.vm_object;
2696         KKASSERT(object != NULL);
2697         KKASSERT(object == entry->object.vm_object);
2698
2699         /*
2700          * NOTE: VM_FAULT_DIRTY allowed later so must hold object exclusively
2701          *       now (or do something more complex XXX).
2702          */
2703         vm_object_hold(object);
2704         vm_object_chain_acquire(object, 0);
2705
2706         noneg = 0;
2707         nopos = 0;
2708         for (i = 0; i < maxpages; ++i) {
2709                 vm_object_t lobject;
2710                 vm_object_t nobject;
2711                 int allocated = 0;
2712                 int error;
2713
2714                 /*
2715                  * This can eat a lot of time on a heavily contended
2716                  * machine so yield on the tick if needed.
2717                  */
2718                 if ((i & 7) == 7)
2719                         lwkt_yield();
2720
2721                 /*
2722                  * Calculate the page to pre-fault, stopping the scan in
2723                  * each direction separately if the limit is reached.
2724                  */
2725                 if (i & 1) {
2726                         if (noneg)
2727                                 continue;
2728                         addr = addra - ((i + 1) >> 1) * PAGE_SIZE;
2729                 } else {
2730                         if (nopos)
2731                                 continue;
2732                         addr = addra + ((i + 2) >> 1) * PAGE_SIZE;
2733                 }
2734                 if (addr < entry->start) {
2735                         noneg = 1;
2736                         if (noneg && nopos)
2737                                 break;
2738                         continue;
2739                 }
2740                 if (addr >= entry->end) {
2741                         nopos = 1;
2742                         if (noneg && nopos)
2743                                 break;
2744                         continue;
2745                 }
2746
2747                 /*
2748                  * Skip pages already mapped, and stop scanning in that
2749                  * direction.  When the scan terminates in both directions
2750                  * we are done.
2751                  */
2752                 if (pmap_prefault_ok(pmap, addr) == 0) {
2753                         if (i & 1)
2754                                 noneg = 1;
2755                         else
2756                                 nopos = 1;
2757                         if (noneg && nopos)
2758                                 break;
2759                         continue;
2760                 }
2761
2762                 /*
2763                  * Follow the VM object chain to obtain the page to be mapped
2764                  * into the pmap.
2765                  *
2766                  * If we reach the terminal object without finding a page
2767                  * and we determine it would be advantageous, then allocate
2768                  * a zero-fill page for the base object.  The base object
2769                  * is guaranteed to be OBJT_DEFAULT for this case.
2770                  *
2771                  * In order to not have to check the pager via *haspage*()
2772                  * we stop if any non-default object is encountered.  e.g.
2773                  * a vnode or swap object would stop the loop.
2774                  */
2775                 index = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
2776                 lobject = object;
2777                 pindex = index;
2778                 pprot = prot;
2779
2780                 KKASSERT(lobject == entry->object.vm_object);
2781                 /*vm_object_hold(lobject); implied */
2782
2783                 while ((m = vm_page_lookup_busy_try(lobject, pindex,
2784                                                     TRUE, &error)) == NULL) {
2785                         if (lobject->type != OBJT_DEFAULT)
2786                                 break;
2787                         if (lobject->backing_object == NULL) {
2788                                 if (vm_fast_fault == 0)
2789                                         break;
2790                                 if ((prot & VM_PROT_WRITE) == 0 ||
2791                                     vm_page_count_min(0)) {
2792                                         break;
2793                                 }
2794
2795                                 /*
2796                                  * NOTE: Allocated from base object
2797                                  */
2798                                 m = vm_page_alloc(object, index,
2799                                                   VM_ALLOC_NORMAL |
2800                                                   VM_ALLOC_ZERO |
2801                                                   VM_ALLOC_USE_GD |
2802                                                   VM_ALLOC_NULL_OK);
2803                                 if (m == NULL)
2804                                         break;
2805                                 allocated = 1;
2806                                 pprot = prot;
2807                                 /* lobject = object .. not needed */
2808                                 break;
2809                         }
2810                         if (lobject->backing_object_offset & PAGE_MASK)
2811                                 break;
2812                         nobject = lobject->backing_object;
2813                         vm_object_hold(nobject);
2814                         KKASSERT(nobject == lobject->backing_object);
2815                         pindex += lobject->backing_object_offset >> PAGE_SHIFT;
2816                         if (lobject != object) {
2817                                 vm_object_lock_swap();
2818                                 vm_object_drop(lobject);
2819                         }
2820                         lobject = nobject;
2821                         pprot &= ~VM_PROT_WRITE;
2822                         vm_object_chain_acquire(lobject, 0);
2823                 }
2824
2825                 /*
2826                  * NOTE: A non-NULL (m) will be associated with lobject if
2827                  *       it was found there, otherwise it is probably a
2828                  *       zero-fill page associated with the base object.
2829                  *
2830                  * Give-up if no page is available.
2831                  */
2832                 if (m == NULL) {
2833                         if (lobject != object) {
2834 #if 0
2835                                 if (object->backing_object != lobject)
2836                                         vm_object_hold(object->backing_object);
2837 #endif
2838                                 vm_object_chain_release_all(
2839                                         object->backing_object, lobject);
2840 #if 0
2841                                 if (object->backing_object != lobject)
2842                                         vm_object_drop(object->backing_object);
2843 #endif
2844                                 vm_object_drop(lobject);
2845                         }
2846                         break;
2847                 }
2848
2849                 /*
2850                  * The object must be marked dirty if we are mapping a
2851                  * writable page.  m->object is either lobject or object,
2852                  * both of which are still held.  Do this before we
2853                  * potentially drop the object.
2854                  */
2855                 if (pprot & VM_PROT_WRITE)
2856                         vm_object_set_writeable_dirty(m->object);
2857
2858                 /*
2859                  * Do not conditionalize on PG_RAM.  If pages are present in
2860                  * the VM system we assume optimal caching.  If caching is
2861                  * not optimal the I/O gravy train will be restarted when we
2862                  * hit an unavailable page.  We do not want to try to restart
2863                  * the gravy train now because we really don't know how much
2864                  * of the object has been cached.  The cost for restarting
2865                  * the gravy train should be low (since accesses will likely
2866                  * be I/O bound anyway).
2867                  */
2868                 if (lobject != object) {
2869 #if 0
2870                         if (object->backing_object != lobject)
2871                                 vm_object_hold(object->backing_object);
2872 #endif
2873                         vm_object_chain_release_all(object->backing_object,
2874                                                     lobject);
2875 #if 0
2876                         if (object->backing_object != lobject)
2877                                 vm_object_drop(object->backing_object);
2878 #endif
2879                         vm_object_drop(lobject);
2880                 }
2881
2882                 /*
2883                  * Enter the page into the pmap if appropriate.  If we had
2884                  * allocated the page we have to place it on a queue.  If not
2885                  * we just have to make sure it isn't on the cache queue
2886                  * (pages on the cache queue are not allowed to be mapped).
2887                  */
2888                 if (allocated) {
2889                         /*
2890                          * Page must be zerod.
2891                          */
2892                         vm_page_zero_fill(m);
2893                         mycpu->gd_cnt.v_zfod++;
2894                         m->valid = VM_PAGE_BITS_ALL;
2895
2896                         /*
2897                          * Handle dirty page case
2898                          */
2899                         if (pprot & VM_PROT_WRITE)
2900                                 vm_set_nosync(m, entry);
2901                         pmap_enter(pmap, addr, m, pprot, 0, entry);
2902                         mycpu->gd_cnt.v_vm_faults++;
2903                         if (curthread->td_lwp)
2904                                 ++curthread->td_lwp->lwp_ru.ru_minflt;
2905                         vm_page_deactivate(m);
2906                         if (pprot & VM_PROT_WRITE) {
2907                                 /*vm_object_set_writeable_dirty(m->object);*/
2908                                 vm_set_nosync(m, entry);
2909                                 if (fault_flags & VM_FAULT_DIRTY) {
2910                                         vm_page_dirty(m);
2911                                         /*XXX*/
2912                                         swap_pager_unswapped(m);
2913                                 }
2914                         }
2915                         vm_page_wakeup(m);
2916                 } else if (error) {
2917                         /* couldn't busy page, no wakeup */
2918                 } else if (
2919                     ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
2920                     (m->flags & PG_FICTITIOUS) == 0) {
2921                         /*
2922                          * A fully valid page not undergoing soft I/O can
2923                          * be immediately entered into the pmap.
2924                          */
2925                         if ((m->queue - m->pc) == PQ_CACHE)
2926                                 vm_page_deactivate(m);
2927                         if (pprot & VM_PROT_WRITE) {
2928                                 /*vm_object_set_writeable_dirty(m->object);*/
2929                                 vm_set_nosync(m, entry);
2930                                 if (fault_flags & VM_FAULT_DIRTY) {
2931                                         vm_page_dirty(m);
2932                                         /*XXX*/
2933                                         swap_pager_unswapped(m);
2934                                 }
2935                         }
2936                         if (pprot & VM_PROT_WRITE)
2937                                 vm_set_nosync(m, entry);
2938                         pmap_enter(pmap, addr, m, pprot, 0, entry);
2939                         mycpu->gd_cnt.v_vm_faults++;
2940                         if (curthread->td_lwp)
2941                                 ++curthread->td_lwp->lwp_ru.ru_minflt;
2942                         vm_page_wakeup(m);
2943                 } else {
2944                         vm_page_wakeup(m);
2945                 }
2946         }
2947         vm_object_chain_release(object);
2948         vm_object_drop(object);
2949 }
2950
2951 /*
2952  * Object can be held shared
2953  */
2954 static void
2955 vm_prefault_quick(pmap_t pmap, vm_offset_t addra,
2956                   vm_map_entry_t entry, int prot, int fault_flags)
2957 {
2958         struct lwp *lp;
2959         vm_page_t m;
2960         vm_offset_t addr;
2961         vm_pindex_t pindex;
2962         vm_object_t object;
2963         int i;
2964         int noneg;
2965         int nopos;
2966         int maxpages;
2967
2968         /*
2969          * Get stable max count value, disabled if set to 0
2970          */
2971         maxpages = vm_prefault_pages;
2972         cpu_ccfence();
2973         if (maxpages <= 0)
2974                 return;
2975
2976         /*
2977          * We do not currently prefault mappings that use virtual page
2978          * tables.  We do not prefault foreign pmaps.
2979          */
2980         if (entry->maptype != VM_MAPTYPE_NORMAL)
2981                 return;
2982         lp = curthread->td_lwp;
2983         if (lp == NULL || (pmap != vmspace_pmap(lp->lwp_vmspace)))
2984                 return;
2985         object = entry->object.vm_object;
2986         if (object->backing_object != NULL)
2987                 return;
2988         ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2989
2990         /*
2991          * Limit pre-fault count to 1024 pages.
2992          */
2993         if (maxpages > 1024)
2994                 maxpages = 1024;
2995
2996         noneg = 0;
2997         nopos = 0;
2998         for (i = 0; i < maxpages; ++i) {
2999                 int error;
3000
3001                 /*
3002                  * Calculate the page to pre-fault, stopping the scan in
3003                  * each direction separately if the limit is reached.
3004                  */
3005                 if (i & 1) {
3006                         if (noneg)
3007                                 continue;
3008                         addr = addra - ((i + 1) >> 1) * PAGE_SIZE;
3009                 } else {
3010                         if (nopos)
3011                                 continue;
3012                         addr = addra + ((i + 2) >> 1) * PAGE_SIZE;
3013                 }
3014                 if (addr < entry->start) {
3015                         noneg = 1;
3016                         if (noneg && nopos)
3017                                 break;
3018                         continue;
3019                 }
3020                 if (addr >= entry->end) {
3021                         nopos = 1;
3022                         if (noneg && nopos)
3023                                 break;
3024                         continue;
3025                 }
3026
3027                 /*
3028                  * Follow the VM object chain to obtain the page to be mapped
3029                  * into the pmap.  This version of the prefault code only
3030                  * works with terminal objects.
3031                  *
3032                  * The page must already exist.  If we encounter a problem
3033                  * we stop here.
3034                  *
3035                  * WARNING!  We cannot call swap_pager_unswapped() or insert
3036                  *           a new vm_page with a shared token.
3037                  */
3038                 pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
3039
3040                 /*
3041                  * Skip pages already mapped, and stop scanning in that
3042                  * direction.  When the scan terminates in both directions
3043                  * we are done.
3044                  */
3045                 if (pmap_prefault_ok(pmap, addr) == 0) {
3046                         if (i & 1)
3047                                 noneg = 1;
3048                         else
3049                                 nopos = 1;
3050                         if (noneg && nopos)
3051                                 break;
3052                         continue;
3053                 }
3054
3055                 /*
3056                  * Shortcut the read-only mapping case using the far more
3057                  * efficient vm_page_lookup_sbusy_try() function.  This
3058                  * allows us to acquire the page soft-busied only which
3059                  * is especially nice for concurrent execs of the same
3060                  * program.
3061                  *
3062                  * The lookup function also validates page suitability
3063                  * (all valid bits set, and not fictitious).
3064                  */
3065                 if ((prot & (VM_PROT_WRITE|VM_PROT_OVERRIDE_WRITE)) == 0) {
3066                         m = vm_page_lookup_sbusy_try(object, pindex);
3067                         if (m == NULL)
3068                                 break;
3069                         pmap_enter(pmap, addr, m, prot, 0, entry);
3070                         mycpu->gd_cnt.v_vm_faults++;
3071                         if (curthread->td_lwp)
3072                                 ++curthread->td_lwp->lwp_ru.ru_minflt;
3073                         vm_page_sbusy_drop(m);
3074                         continue;
3075                 }
3076
3077                 /*
3078                  * Fallback to normal vm_page lookup code.  This code
3079                  * hard-busies the page.  Not only that, but the page
3080                  * can remain in that state for a significant period
3081                  * time due to pmap_enter()'s overhead.
3082                  */
3083                 m = vm_page_lookup_busy_try(object, pindex, TRUE, &error);
3084                 if (m == NULL || error)
3085                         break;
3086
3087                 /*
3088                  * Stop if the page cannot be trivially entered into the
3089                  * pmap.
3090                  */
3091                 if (((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) ||
3092                     (m->flags & PG_FICTITIOUS) ||
3093                     ((m->flags & PG_SWAPPED) &&
3094                      (prot & VM_PROT_WRITE) &&
3095                      (fault_flags & VM_FAULT_DIRTY))) {
3096                         vm_page_wakeup(m);
3097                         break;
3098                 }
3099
3100                 /*
3101                  * Enter the page into the pmap.  The object might be held
3102                  * shared so we can't do any (serious) modifying operation
3103                  * on it.
3104                  */
3105                 if ((m->queue - m->pc) == PQ_CACHE)
3106                         vm_page_deactivate(m);
3107                 if (prot & VM_PROT_WRITE) {
3108                         vm_object_set_writeable_dirty(m->object);
3109                         vm_set_nosync(m, entry);
3110                         if (fault_flags & VM_FAULT_DIRTY) {
3111                                 vm_page_dirty(m);
3112                                 /* can't happeen due to conditional above */
3113                                 /* swap_pager_unswapped(m); */
3114                         }
3115                 }
3116                 pmap_enter(pmap, addr, m, prot, 0, entry);
3117                 mycpu->gd_cnt.v_vm_faults++;
3118                 if (curthread->td_lwp)
3119                         ++curthread->td_lwp->lwp_ru.ru_minflt;
3120                 vm_page_wakeup(m);
3121         }
3122 }