kernel - Fix MADV_NOSYNC and MAP_NOSYNC, improve vkernel performance
[games.git] / sys / vm / vm_fault.c
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1991, 1993
5  *      The Regents of the University of California.  All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  *
11  *
12  * This code is derived from software contributed to Berkeley by
13  * The Mach Operating System project at Carnegie-Mellon University.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. All advertising materials mentioning features or use of this software
24  *    must display the following acknowledgement:
25  *      This product includes software developed by the University of
26  *      California, Berkeley and its contributors.
27  * 4. Neither the name of the University nor the names of its contributors
28  *    may be used to endorse or promote products derived from this software
29  *    without specific prior written permission.
30  *
31  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41  * SUCH DAMAGE.
42  *
43  *      from: @(#)vm_fault.c    8.4 (Berkeley) 1/12/94
44  *
45  *
46  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
47  * All rights reserved.
48  *
49  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
50  *
51  * Permission to use, copy, modify and distribute this software and
52  * its documentation is hereby granted, provided that both the copyright
53  * notice and this permission notice appear in all copies of the
54  * software, derivative works or modified versions, and any portions
55  * thereof, and that both notices appear in supporting documentation.
56  *
57  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
58  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
59  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
60  *
61  * Carnegie Mellon requests users of this software to return to
62  *
63  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
64  *  School of Computer Science
65  *  Carnegie Mellon University
66  *  Pittsburgh PA 15213-3890
67  *
68  * any improvements or extensions that they make and grant Carnegie the
69  * rights to redistribute these changes.
70  *
71  * $FreeBSD: src/sys/vm/vm_fault.c,v 1.108.2.8 2002/02/26 05:49:27 silby Exp $
72  * $DragonFly: src/sys/vm/vm_fault.c,v 1.47 2008/07/01 02:02:56 dillon Exp $
73  */
74
75 /*
76  *      Page fault handling module.
77  */
78
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/proc.h>
83 #include <sys/vnode.h>
84 #include <sys/resourcevar.h>
85 #include <sys/vmmeter.h>
86 #include <sys/vkernel.h>
87 #include <sys/lock.h>
88 #include <sys/sysctl.h>
89
90 #include <cpu/lwbuf.h>
91
92 #include <vm/vm.h>
93 #include <vm/vm_param.h>
94 #include <vm/pmap.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_object.h>
97 #include <vm/vm_page.h>
98 #include <vm/vm_pageout.h>
99 #include <vm/vm_kern.h>
100 #include <vm/vm_pager.h>
101 #include <vm/vnode_pager.h>
102 #include <vm/vm_extern.h>
103
104 #include <sys/thread2.h>
105 #include <vm/vm_page2.h>
106
107 struct faultstate {
108         vm_page_t m;
109         vm_object_t object;
110         vm_pindex_t pindex;
111         vm_prot_t prot;
112         vm_page_t first_m;
113         vm_object_t first_object;
114         vm_prot_t first_prot;
115         vm_map_t map;
116         vm_map_entry_t entry;
117         int lookup_still_valid;
118         int didlimit;
119         int hardfault;
120         int fault_flags;
121         int map_generation;
122         boolean_t wired;
123         struct vnode *vp;
124 };
125
126 static int vm_fast_fault = 1;
127 SYSCTL_INT(_vm, OID_AUTO, fast_fault, CTLFLAG_RW, &vm_fast_fault, 0, 
128            "Burst fault zero-fill regions");
129 static int debug_cluster = 0;
130 SYSCTL_INT(_vm, OID_AUTO, debug_cluster, CTLFLAG_RW, &debug_cluster, 0, "");
131
132 static int vm_fault_object(struct faultstate *, vm_pindex_t, vm_prot_t);
133 static int vm_fault_vpagetable(struct faultstate *, vm_pindex_t *, vpte_t, int);
134 #if 0
135 static int vm_fault_additional_pages (vm_page_t, int, int, vm_page_t *, int *);
136 #endif
137 static int vm_fault_ratelimit(struct vmspace *);
138 static void vm_set_nosync(vm_page_t m, vm_map_entry_t entry);
139 static void vm_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry,
140                         int prot);
141
142 /*
143  * The caller must hold vm_token.
144  */
145 static __inline void
146 release_page(struct faultstate *fs)
147 {
148         vm_page_deactivate(fs->m);
149         vm_page_wakeup(fs->m);
150         fs->m = NULL;
151 }
152
153 /*
154  * The caller must hold vm_token.
155  */
156 static __inline void
157 unlock_map(struct faultstate *fs)
158 {
159         if (fs->lookup_still_valid && fs->map) {
160                 vm_map_lookup_done(fs->map, fs->entry, 0);
161                 fs->lookup_still_valid = FALSE;
162         }
163 }
164
165 /*
166  * Clean up after a successful call to vm_fault_object() so another call
167  * to vm_fault_object() can be made.
168  *
169  * The caller must hold vm_token.
170  */
171 static void
172 _cleanup_successful_fault(struct faultstate *fs, int relock)
173 {
174         if (fs->object != fs->first_object) {
175                 vm_page_free(fs->first_m);
176                 vm_object_pip_wakeup(fs->object);
177                 fs->first_m = NULL;
178         }
179         fs->object = fs->first_object;
180         if (relock && fs->lookup_still_valid == FALSE) {
181                 if (fs->map)
182                         vm_map_lock_read(fs->map);
183                 fs->lookup_still_valid = TRUE;
184         }
185 }
186
187 /*
188  * The caller must hold vm_token.
189  */
190 static void
191 _unlock_things(struct faultstate *fs, int dealloc)
192 {
193         vm_object_pip_wakeup(fs->first_object);
194         _cleanup_successful_fault(fs, 0);
195         if (dealloc) {
196                 vm_object_deallocate(fs->first_object);
197                 fs->first_object = NULL;
198         }
199         unlock_map(fs); 
200         if (fs->vp != NULL) { 
201                 vput(fs->vp);
202                 fs->vp = NULL;
203         }
204 }
205
206 #define unlock_things(fs) _unlock_things(fs, 0)
207 #define unlock_and_deallocate(fs) _unlock_things(fs, 1)
208 #define cleanup_successful_fault(fs) _cleanup_successful_fault(fs, 1)
209
210 /*
211  * TRYPAGER 
212  *
213  * Determine if the pager for the current object *might* contain the page.
214  *
215  * We only need to try the pager if this is not a default object (default
216  * objects are zero-fill and have no real pager), and if we are not taking
217  * a wiring fault or if the FS entry is wired.
218  */
219 #define TRYPAGER(fs)    \
220                 (fs->object->type != OBJT_DEFAULT && \
221                 (((fs->fault_flags & VM_FAULT_WIRE_MASK) == 0) || fs->wired))
222
223 /*
224  * vm_fault:
225  *
226  * Handle a page fault occuring at the given address, requiring the given
227  * permissions, in the map specified.  If successful, the page is inserted
228  * into the associated physical map.
229  *
230  * NOTE: The given address should be truncated to the proper page address.
231  *
232  * KERN_SUCCESS is returned if the page fault is handled; otherwise,
233  * a standard error specifying why the fault is fatal is returned.
234  *
235  * The map in question must be referenced, and remains so.
236  * The caller may hold no locks.
237  * No other requirements.
238  */
239 int
240 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags)
241 {
242         int result;
243         vm_pindex_t first_pindex;
244         struct faultstate fs;
245         int growstack;
246
247         mycpu->gd_cnt.v_vm_faults++;
248
249         fs.didlimit = 0;
250         fs.hardfault = 0;
251         fs.fault_flags = fault_flags;
252         growstack = 1;
253
254 RetryFault:
255         /*
256          * Find the vm_map_entry representing the backing store and resolve
257          * the top level object and page index.  This may have the side
258          * effect of executing a copy-on-write on the map entry and/or
259          * creating a shadow object, but will not COW any actual VM pages.
260          *
261          * On success fs.map is left read-locked and various other fields 
262          * are initialized but not otherwise referenced or locked.
263          *
264          * NOTE!  vm_map_lookup will try to upgrade the fault_type to
265          * VM_FAULT_WRITE if the map entry is a virtual page table and also
266          * writable, so we can set the 'A'accessed bit in the virtual page
267          * table entry.
268          */
269         fs.map = map;
270         result = vm_map_lookup(&fs.map, vaddr, fault_type,
271                                &fs.entry, &fs.first_object,
272                                &first_pindex, &fs.first_prot, &fs.wired);
273
274         /*
275          * If the lookup failed or the map protections are incompatible,
276          * the fault generally fails.  However, if the caller is trying
277          * to do a user wiring we have more work to do.
278          */
279         if (result != KERN_SUCCESS) {
280                 if (result != KERN_PROTECTION_FAILURE ||
281                     (fs.fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE)
282                 {
283                         if (result == KERN_INVALID_ADDRESS && growstack &&
284                             map != &kernel_map && curproc != NULL) {
285                                 result = vm_map_growstack(curproc, vaddr);
286                                 if (result != KERN_SUCCESS)
287                                         return (KERN_FAILURE);
288                                 growstack = 0;
289                                 goto RetryFault;
290                         }
291                         return (result);
292                 }
293
294                 /*
295                  * If we are user-wiring a r/w segment, and it is COW, then
296                  * we need to do the COW operation.  Note that we don't
297                  * currently COW RO sections now, because it is NOT desirable
298                  * to COW .text.  We simply keep .text from ever being COW'ed
299                  * and take the heat that one cannot debug wired .text sections.
300                  */
301                 result = vm_map_lookup(&fs.map, vaddr,
302                                        VM_PROT_READ|VM_PROT_WRITE|
303                                         VM_PROT_OVERRIDE_WRITE,
304                                        &fs.entry, &fs.first_object,
305                                        &first_pindex, &fs.first_prot,
306                                        &fs.wired);
307                 if (result != KERN_SUCCESS)
308                         return result;
309
310                 /*
311                  * If we don't COW now, on a user wire, the user will never
312                  * be able to write to the mapping.  If we don't make this
313                  * restriction, the bookkeeping would be nearly impossible.
314                  */
315                 if ((fs.entry->protection & VM_PROT_WRITE) == 0)
316                         fs.entry->max_protection &= ~VM_PROT_WRITE;
317         }
318
319         /*
320          * fs.map is read-locked
321          *
322          * Misc checks.  Save the map generation number to detect races.
323          */
324         fs.map_generation = fs.map->timestamp;
325
326         if (fs.entry->eflags & MAP_ENTRY_NOFAULT) {
327                 panic("vm_fault: fault on nofault entry, addr: %lx",
328                     (u_long)vaddr);
329         }
330
331         /*
332          * A system map entry may return a NULL object.  No object means
333          * no pager means an unrecoverable kernel fault.
334          */
335         if (fs.first_object == NULL) {
336                 panic("vm_fault: unrecoverable fault at %p in entry %p",
337                         (void *)vaddr, fs.entry);
338         }
339
340         /*
341          * Make a reference to this object to prevent its disposal while we
342          * are messing with it.  Once we have the reference, the map is free
343          * to be diddled.  Since objects reference their shadows (and copies),
344          * they will stay around as well.
345          *
346          * Bump the paging-in-progress count to prevent size changes (e.g.
347          * truncation operations) during I/O.  This must be done after
348          * obtaining the vnode lock in order to avoid possible deadlocks.
349          *
350          * The vm_token is needed to manipulate the vm_object
351          */
352         lwkt_gettoken(&vm_token);
353         vm_object_reference(fs.first_object);
354         fs.vp = vnode_pager_lock(fs.first_object);
355         vm_object_pip_add(fs.first_object, 1);
356         lwkt_reltoken(&vm_token);
357
358         fs.lookup_still_valid = TRUE;
359         fs.first_m = NULL;
360         fs.object = fs.first_object;    /* so unlock_and_deallocate works */
361
362         /*
363          * If the entry is wired we cannot change the page protection.
364          */
365         if (fs.wired)
366                 fault_type = fs.first_prot;
367
368         /*
369          * The page we want is at (first_object, first_pindex), but if the
370          * vm_map_entry is VM_MAPTYPE_VPAGETABLE we have to traverse the
371          * page table to figure out the actual pindex.
372          *
373          * NOTE!  DEVELOPMENT IN PROGRESS, THIS IS AN INITIAL IMPLEMENTATION
374          * ONLY
375          */
376         if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) {
377                 result = vm_fault_vpagetable(&fs, &first_pindex,
378                                              fs.entry->aux.master_pde,
379                                              fault_type);
380                 if (result == KERN_TRY_AGAIN)
381                         goto RetryFault;
382                 if (result != KERN_SUCCESS)
383                         return (result);
384         }
385
386         /*
387          * Now we have the actual (object, pindex), fault in the page.  If
388          * vm_fault_object() fails it will unlock and deallocate the FS
389          * data.   If it succeeds everything remains locked and fs->object
390          * will have an additional PIP count if it is not equal to
391          * fs->first_object
392          *
393          * vm_fault_object will set fs->prot for the pmap operation.  It is
394          * allowed to set VM_PROT_WRITE if fault_type == VM_PROT_READ if the
395          * page can be safely written.  However, it will force a read-only
396          * mapping for a read fault if the memory is managed by a virtual
397          * page table.
398          */
399         result = vm_fault_object(&fs, first_pindex, fault_type);
400
401         if (result == KERN_TRY_AGAIN)
402                 goto RetryFault;
403         if (result != KERN_SUCCESS)
404                 return (result);
405
406         /*
407          * On success vm_fault_object() does not unlock or deallocate, and fs.m
408          * will contain a busied page.
409          *
410          * Enter the page into the pmap and do pmap-related adjustments.
411          */
412         pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, fs.wired);
413
414         /*
415          * Burst in a few more pages if possible.  The fs.map should still
416          * be locked.
417          */
418         if (fault_flags & VM_FAULT_BURST) {
419                 if ((fs.fault_flags & VM_FAULT_WIRE_MASK) == 0 &&
420                     fs.wired == 0) {
421                         vm_prefault(fs.map->pmap, vaddr, fs.entry, fs.prot);
422                 }
423         }
424         unlock_things(&fs);
425
426         vm_page_flag_clear(fs.m, PG_ZERO);
427         vm_page_flag_set(fs.m, PG_REFERENCED);
428
429         /*
430          * If the page is not wired down, then put it where the pageout daemon
431          * can find it.
432          *
433          * We do not really need to get vm_token here but since all the
434          * vm_*() calls have to doing it here improves efficiency.
435          */
436         lwkt_gettoken(&vm_token);
437         if (fs.fault_flags & VM_FAULT_WIRE_MASK) {
438                 if (fs.wired)
439                         vm_page_wire(fs.m);
440                 else
441                         vm_page_unwire(fs.m, 1);
442         } else {
443                 vm_page_activate(fs.m);
444         }
445
446         if (curthread->td_lwp) {
447                 if (fs.hardfault) {
448                         curthread->td_lwp->lwp_ru.ru_majflt++;
449                 } else {
450                         curthread->td_lwp->lwp_ru.ru_minflt++;
451                 }
452         }
453
454         /*
455          * Unlock everything, and return
456          */
457         vm_page_wakeup(fs.m);
458         vm_object_deallocate(fs.first_object);
459         lwkt_reltoken(&vm_token);
460
461         return (KERN_SUCCESS);
462 }
463
464 /*
465  * Fault in the specified virtual address in the current process map, 
466  * returning a held VM page or NULL.  See vm_fault_page() for more 
467  * information.
468  *
469  * No requirements.
470  */
471 vm_page_t
472 vm_fault_page_quick(vm_offset_t va, vm_prot_t fault_type, int *errorp)
473 {
474         struct lwp *lp = curthread->td_lwp;
475         vm_page_t m;
476
477         m = vm_fault_page(&lp->lwp_vmspace->vm_map, va, 
478                           fault_type, VM_FAULT_NORMAL, errorp);
479         return(m);
480 }
481
482 /*
483  * Fault in the specified virtual address in the specified map, doing all
484  * necessary manipulation of the object store and all necessary I/O.  Return
485  * a held VM page or NULL, and set *errorp.  The related pmap is not
486  * updated.
487  *
488  * The returned page will be properly dirtied if VM_PROT_WRITE was specified,
489  * and marked PG_REFERENCED as well.
490  *
491  * If the page cannot be faulted writable and VM_PROT_WRITE was specified, an
492  * error will be returned.
493  *
494  * No requirements.
495  */
496 vm_page_t
497 vm_fault_page(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
498               int fault_flags, int *errorp)
499 {
500         vm_pindex_t first_pindex;
501         struct faultstate fs;
502         int result;
503         vm_prot_t orig_fault_type = fault_type;
504
505         mycpu->gd_cnt.v_vm_faults++;
506
507         fs.didlimit = 0;
508         fs.hardfault = 0;
509         fs.fault_flags = fault_flags;
510         KKASSERT((fault_flags & VM_FAULT_WIRE_MASK) == 0);
511
512 RetryFault:
513         /*
514          * Find the vm_map_entry representing the backing store and resolve
515          * the top level object and page index.  This may have the side
516          * effect of executing a copy-on-write on the map entry and/or
517          * creating a shadow object, but will not COW any actual VM pages.
518          *
519          * On success fs.map is left read-locked and various other fields 
520          * are initialized but not otherwise referenced or locked.
521          *
522          * NOTE!  vm_map_lookup will upgrade the fault_type to VM_FAULT_WRITE
523          * if the map entry is a virtual page table and also writable,
524          * so we can set the 'A'accessed bit in the virtual page table entry.
525          */
526         fs.map = map;
527         result = vm_map_lookup(&fs.map, vaddr, fault_type,
528                                &fs.entry, &fs.first_object,
529                                &first_pindex, &fs.first_prot, &fs.wired);
530
531         if (result != KERN_SUCCESS) {
532                 *errorp = result;
533                 return (NULL);
534         }
535
536         /*
537          * fs.map is read-locked
538          *
539          * Misc checks.  Save the map generation number to detect races.
540          */
541         fs.map_generation = fs.map->timestamp;
542
543         if (fs.entry->eflags & MAP_ENTRY_NOFAULT) {
544                 panic("vm_fault: fault on nofault entry, addr: %lx",
545                     (u_long)vaddr);
546         }
547
548         /*
549          * A system map entry may return a NULL object.  No object means
550          * no pager means an unrecoverable kernel fault.
551          */
552         if (fs.first_object == NULL) {
553                 panic("vm_fault: unrecoverable fault at %p in entry %p",
554                         (void *)vaddr, fs.entry);
555         }
556
557         /*
558          * Make a reference to this object to prevent its disposal while we
559          * are messing with it.  Once we have the reference, the map is free
560          * to be diddled.  Since objects reference their shadows (and copies),
561          * they will stay around as well.
562          *
563          * Bump the paging-in-progress count to prevent size changes (e.g.
564          * truncation operations) during I/O.  This must be done after
565          * obtaining the vnode lock in order to avoid possible deadlocks.
566          *
567          * The vm_token is needed to manipulate the vm_object
568          */
569         lwkt_gettoken(&vm_token);
570         vm_object_reference(fs.first_object);
571         fs.vp = vnode_pager_lock(fs.first_object);
572         vm_object_pip_add(fs.first_object, 1);
573         lwkt_reltoken(&vm_token);
574
575         fs.lookup_still_valid = TRUE;
576         fs.first_m = NULL;
577         fs.object = fs.first_object;    /* so unlock_and_deallocate works */
578
579         /*
580          * If the entry is wired we cannot change the page protection.
581          */
582         if (fs.wired)
583                 fault_type = fs.first_prot;
584
585         /*
586          * The page we want is at (first_object, first_pindex), but if the
587          * vm_map_entry is VM_MAPTYPE_VPAGETABLE we have to traverse the
588          * page table to figure out the actual pindex.
589          *
590          * NOTE!  DEVELOPMENT IN PROGRESS, THIS IS AN INITIAL IMPLEMENTATION
591          * ONLY
592          */
593         if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) {
594                 result = vm_fault_vpagetable(&fs, &first_pindex,
595                                              fs.entry->aux.master_pde,
596                                              fault_type);
597                 if (result == KERN_TRY_AGAIN)
598                         goto RetryFault;
599                 if (result != KERN_SUCCESS) {
600                         *errorp = result;
601                         return (NULL);
602                 }
603         }
604
605         /*
606          * Now we have the actual (object, pindex), fault in the page.  If
607          * vm_fault_object() fails it will unlock and deallocate the FS
608          * data.   If it succeeds everything remains locked and fs->object
609          * will have an additinal PIP count if it is not equal to
610          * fs->first_object
611          */
612         result = vm_fault_object(&fs, first_pindex, fault_type);
613
614         if (result == KERN_TRY_AGAIN)
615                 goto RetryFault;
616         if (result != KERN_SUCCESS) {
617                 *errorp = result;
618                 return(NULL);
619         }
620
621         if ((orig_fault_type & VM_PROT_WRITE) &&
622             (fs.prot & VM_PROT_WRITE) == 0) {
623                 *errorp = KERN_PROTECTION_FAILURE;
624                 unlock_and_deallocate(&fs);
625                 return(NULL);
626         }
627
628         /*
629          * On success vm_fault_object() does not unlock or deallocate, and fs.m
630          * will contain a busied page.
631          */
632         unlock_things(&fs);
633
634         /*
635          * Return a held page.  We are not doing any pmap manipulation so do
636          * not set PG_MAPPED.  However, adjust the page flags according to
637          * the fault type because the caller may not use a managed pmapping
638          * (so we don't want to lose the fact that the page will be dirtied
639          * if a write fault was specified).
640          */
641         lwkt_gettoken(&vm_token);
642         vm_page_hold(fs.m);
643         vm_page_flag_clear(fs.m, PG_ZERO);
644         if (fault_type & VM_PROT_WRITE)
645                 vm_page_dirty(fs.m);
646
647         /*
648          * Update the pmap.  We really only have to do this if a COW
649          * occured to replace the read-only page with the new page.  For
650          * now just do it unconditionally. XXX
651          */
652         pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, fs.wired);
653         vm_page_flag_set(fs.m, PG_REFERENCED);
654
655         /*
656          * Unbusy the page by activating it.  It remains held and will not
657          * be reclaimed.
658          */
659         vm_page_activate(fs.m);
660
661         if (curthread->td_lwp) {
662                 if (fs.hardfault) {
663                         curthread->td_lwp->lwp_ru.ru_majflt++;
664                 } else {
665                         curthread->td_lwp->lwp_ru.ru_minflt++;
666                 }
667         }
668
669         /*
670          * Unlock everything, and return the held page.
671          */
672         vm_page_wakeup(fs.m);
673         vm_object_deallocate(fs.first_object);
674         lwkt_reltoken(&vm_token);
675
676         *errorp = 0;
677         return(fs.m);
678 }
679
680 /*
681  * Fault in the specified (object,offset), dirty the returned page as
682  * needed.  If the requested fault_type cannot be done NULL and an
683  * error is returned.
684  *
685  * A held (but not busied) page is returned.
686  *
687  * No requirements.
688  */
689 vm_page_t
690 vm_fault_object_page(vm_object_t object, vm_ooffset_t offset,
691                      vm_prot_t fault_type, int fault_flags, int *errorp)
692 {
693         int result;
694         vm_pindex_t first_pindex;
695         struct faultstate fs;
696         struct vm_map_entry entry;
697
698         bzero(&entry, sizeof(entry));
699         entry.object.vm_object = object;
700         entry.maptype = VM_MAPTYPE_NORMAL;
701         entry.protection = entry.max_protection = fault_type;
702
703         fs.didlimit = 0;
704         fs.hardfault = 0;
705         fs.fault_flags = fault_flags;
706         fs.map = NULL;
707         KKASSERT((fault_flags & VM_FAULT_WIRE_MASK) == 0);
708
709 RetryFault:
710         
711         fs.first_object = object;
712         first_pindex = OFF_TO_IDX(offset);
713         fs.entry = &entry;
714         fs.first_prot = fault_type;
715         fs.wired = 0;
716         /*fs.map_generation = 0; unused */
717
718         /*
719          * Make a reference to this object to prevent its disposal while we
720          * are messing with it.  Once we have the reference, the map is free
721          * to be diddled.  Since objects reference their shadows (and copies),
722          * they will stay around as well.
723          *
724          * Bump the paging-in-progress count to prevent size changes (e.g.
725          * truncation operations) during I/O.  This must be done after
726          * obtaining the vnode lock in order to avoid possible deadlocks.
727          */
728         lwkt_gettoken(&vm_token);
729         vm_object_reference(fs.first_object);
730         fs.vp = vnode_pager_lock(fs.first_object);
731         vm_object_pip_add(fs.first_object, 1);
732         lwkt_reltoken(&vm_token);
733
734         fs.lookup_still_valid = TRUE;
735         fs.first_m = NULL;
736         fs.object = fs.first_object;    /* so unlock_and_deallocate works */
737
738 #if 0
739         /* XXX future - ability to operate on VM object using vpagetable */
740         if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) {
741                 result = vm_fault_vpagetable(&fs, &first_pindex,
742                                              fs.entry->aux.master_pde,
743                                              fault_type);
744                 if (result == KERN_TRY_AGAIN)
745                         goto RetryFault;
746                 if (result != KERN_SUCCESS) {
747                         *errorp = result;
748                         return (NULL);
749                 }
750         }
751 #endif
752
753         /*
754          * Now we have the actual (object, pindex), fault in the page.  If
755          * vm_fault_object() fails it will unlock and deallocate the FS
756          * data.   If it succeeds everything remains locked and fs->object
757          * will have an additinal PIP count if it is not equal to
758          * fs->first_object
759          */
760         result = vm_fault_object(&fs, first_pindex, fault_type);
761
762         if (result == KERN_TRY_AGAIN)
763                 goto RetryFault;
764         if (result != KERN_SUCCESS) {
765                 *errorp = result;
766                 return(NULL);
767         }
768
769         if ((fault_type & VM_PROT_WRITE) && (fs.prot & VM_PROT_WRITE) == 0) {
770                 *errorp = KERN_PROTECTION_FAILURE;
771                 unlock_and_deallocate(&fs);
772                 return(NULL);
773         }
774
775         /*
776          * On success vm_fault_object() does not unlock or deallocate, and fs.m
777          * will contain a busied page.
778          */
779         unlock_things(&fs);
780
781         /*
782          * Return a held page.  We are not doing any pmap manipulation so do
783          * not set PG_MAPPED.  However, adjust the page flags according to
784          * the fault type because the caller may not use a managed pmapping
785          * (so we don't want to lose the fact that the page will be dirtied
786          * if a write fault was specified).
787          */
788         lwkt_gettoken(&vm_token);
789         vm_page_hold(fs.m);
790         vm_page_flag_clear(fs.m, PG_ZERO);
791         if (fault_type & VM_PROT_WRITE)
792                 vm_page_dirty(fs.m);
793
794         if (fault_flags & VM_FAULT_DIRTY)
795                 vm_page_dirty(fs.m);
796         if (fault_flags & VM_FAULT_UNSWAP)
797                 swap_pager_unswapped(fs.m);
798
799         /*
800          * Indicate that the page was accessed.
801          */
802         vm_page_flag_set(fs.m, PG_REFERENCED);
803
804         /*
805          * Unbusy the page by activating it.  It remains held and will not
806          * be reclaimed.
807          */
808         vm_page_activate(fs.m);
809
810         if (curthread->td_lwp) {
811                 if (fs.hardfault) {
812                         mycpu->gd_cnt.v_vm_faults++;
813                         curthread->td_lwp->lwp_ru.ru_majflt++;
814                 } else {
815                         curthread->td_lwp->lwp_ru.ru_minflt++;
816                 }
817         }
818
819         /*
820          * Unlock everything, and return the held page.
821          */
822         vm_page_wakeup(fs.m);
823         vm_object_deallocate(fs.first_object);
824         lwkt_reltoken(&vm_token);
825
826         *errorp = 0;
827         return(fs.m);
828 }
829
830 /*
831  * Translate the virtual page number (first_pindex) that is relative
832  * to the address space into a logical page number that is relative to the
833  * backing object.  Use the virtual page table pointed to by (vpte).
834  *
835  * This implements an N-level page table.  Any level can terminate the
836  * scan by setting VPTE_PS.   A linear mapping is accomplished by setting
837  * VPTE_PS in the master page directory entry set via mcontrol(MADV_SETMAP).
838  *
839  * No requirements (vm_token need not be held).
840  */
841 static
842 int
843 vm_fault_vpagetable(struct faultstate *fs, vm_pindex_t *pindex,
844                     vpte_t vpte, int fault_type)
845 {
846         struct lwbuf *lwb;
847         int vshift = VPTE_FRAME_END - PAGE_SHIFT; /* index bits remaining */
848         int result = KERN_SUCCESS;
849         vpte_t *ptep;
850
851         for (;;) {
852                 /*
853                  * We cannot proceed if the vpte is not valid, not readable
854                  * for a read fault, or not writable for a write fault.
855                  */
856                 if ((vpte & VPTE_V) == 0) {
857                         unlock_and_deallocate(fs);
858                         return (KERN_FAILURE);
859                 }
860                 if ((fault_type & VM_PROT_READ) && (vpte & VPTE_R) == 0) {
861                         unlock_and_deallocate(fs);
862                         return (KERN_FAILURE);
863                 }
864                 if ((fault_type & VM_PROT_WRITE) && (vpte & VPTE_W) == 0) {
865                         unlock_and_deallocate(fs);
866                         return (KERN_FAILURE);
867                 }
868                 if ((vpte & VPTE_PS) || vshift == 0)
869                         break;
870                 KKASSERT(vshift >= VPTE_PAGE_BITS);
871
872                 /*
873                  * Get the page table page.  Nominally we only read the page
874                  * table, but since we are actively setting VPTE_M and VPTE_A,
875                  * tell vm_fault_object() that we are writing it. 
876                  *
877                  * There is currently no real need to optimize this.
878                  */
879                 result = vm_fault_object(fs, (vpte & VPTE_FRAME) >> PAGE_SHIFT,
880                                          VM_PROT_READ|VM_PROT_WRITE);
881                 if (result != KERN_SUCCESS)
882                         return (result);
883
884                 /*
885                  * Process the returned fs.m and look up the page table
886                  * entry in the page table page.
887                  */
888                 vshift -= VPTE_PAGE_BITS;
889                 lwb = lwbuf_alloc(fs->m);
890                 ptep = ((vpte_t *)lwbuf_kva(lwb) +
891                         ((*pindex >> vshift) & VPTE_PAGE_MASK));
892                 vpte = *ptep;
893
894                 /*
895                  * Page table write-back.  If the vpte is valid for the
896                  * requested operation, do a write-back to the page table.
897                  *
898                  * XXX VPTE_M is not set properly for page directory pages.
899                  * It doesn't get set in the page directory if the page table
900                  * is modified during a read access.
901                  */
902                 if ((fault_type & VM_PROT_WRITE) && (vpte & VPTE_V) &&
903                     (vpte & VPTE_W)) {
904                         if ((vpte & (VPTE_M|VPTE_A)) != (VPTE_M|VPTE_A)) {
905                                 atomic_set_long(ptep, VPTE_M | VPTE_A);
906                                 vm_page_dirty(fs->m);
907                         }
908                 }
909                 if ((fault_type & VM_PROT_READ) && (vpte & VPTE_V) &&
910                     (vpte & VPTE_R)) {
911                         if ((vpte & VPTE_A) == 0) {
912                                 atomic_set_long(ptep, VPTE_A);
913                                 vm_page_dirty(fs->m);
914                         }
915                 }
916                 lwbuf_free(lwb);
917                 vm_page_flag_set(fs->m, PG_REFERENCED);
918                 vm_page_activate(fs->m);
919                 vm_page_wakeup(fs->m);
920                 cleanup_successful_fault(fs);
921         }
922         /*
923          * Combine remaining address bits with the vpte.
924          */
925         /* JG how many bits from each? */
926         *pindex = ((vpte & VPTE_FRAME) >> PAGE_SHIFT) +
927                   (*pindex & ((1L << vshift) - 1));
928         return (KERN_SUCCESS);
929 }
930
931
932 /*
933  * This is the core of the vm_fault code.
934  *
935  * Do all operations required to fault-in (fs.first_object, pindex).  Run
936  * through the shadow chain as necessary and do required COW or virtual
937  * copy operations.  The caller has already fully resolved the vm_map_entry
938  * and, if appropriate, has created a copy-on-write layer.  All we need to
939  * do is iterate the object chain.
940  *
941  * On failure (fs) is unlocked and deallocated and the caller may return or
942  * retry depending on the failure code.  On success (fs) is NOT unlocked or
943  * deallocated, fs.m will contained a resolved, busied page, and fs.object
944  * will have an additional PIP count if it is not equal to fs.first_object.
945  *
946  * No requirements.
947  */
948 static
949 int
950 vm_fault_object(struct faultstate *fs,
951                 vm_pindex_t first_pindex, vm_prot_t fault_type)
952 {
953         vm_object_t next_object;
954         vm_pindex_t pindex;
955
956         fs->prot = fs->first_prot;
957         fs->object = fs->first_object;
958         pindex = first_pindex;
959
960         /* 
961          * If a read fault occurs we try to make the page writable if
962          * possible.  There are three cases where we cannot make the
963          * page mapping writable:
964          *
965          * (1) The mapping is read-only or the VM object is read-only,
966          *     fs->prot above will simply not have VM_PROT_WRITE set.
967          *
968          * (2) If the mapping is a virtual page table we need to be able
969          *     to detect writes so we can set VPTE_M in the virtual page
970          *     table.
971          *
972          * (3) If the VM page is read-only or copy-on-write, upgrading would
973          *     just result in an unnecessary COW fault.
974          *
975          * VM_PROT_VPAGED is set if faulting via a virtual page table and
976          * causes adjustments to the 'M'odify bit to also turn off write
977          * access to force a re-fault.
978          */
979         if (fs->entry->maptype == VM_MAPTYPE_VPAGETABLE) {
980                 if ((fault_type & VM_PROT_WRITE) == 0)
981                         fs->prot &= ~VM_PROT_WRITE;
982         }
983
984         lwkt_gettoken(&vm_token);
985
986         for (;;) {
987                 /*
988                  * If the object is dead, we stop here
989                  */
990                 if (fs->object->flags & OBJ_DEAD) {
991                         unlock_and_deallocate(fs);
992                         lwkt_reltoken(&vm_token);
993                         return (KERN_PROTECTION_FAILURE);
994                 }
995
996                 /*
997                  * See if page is resident.  spl protection is required
998                  * to avoid an interrupt unbusy/free race against our
999                  * lookup.  We must hold the protection through a page
1000                  * allocation or busy.
1001                  */
1002                 crit_enter();
1003                 fs->m = vm_page_lookup(fs->object, pindex);
1004                 if (fs->m != NULL) {
1005                         int queue;
1006                         /*
1007                          * Wait/Retry if the page is busy.  We have to do this
1008                          * if the page is busy via either PG_BUSY or 
1009                          * vm_page_t->busy because the vm_pager may be using
1010                          * vm_page_t->busy for pageouts ( and even pageins if
1011                          * it is the vnode pager ), and we could end up trying
1012                          * to pagein and pageout the same page simultaneously.
1013                          *
1014                          * We can theoretically allow the busy case on a read
1015                          * fault if the page is marked valid, but since such
1016                          * pages are typically already pmap'd, putting that
1017                          * special case in might be more effort then it is 
1018                          * worth.  We cannot under any circumstances mess
1019                          * around with a vm_page_t->busy page except, perhaps,
1020                          * to pmap it.
1021                          */
1022                         if ((fs->m->flags & PG_BUSY) || fs->m->busy) {
1023                                 unlock_things(fs);
1024                                 vm_page_sleep_busy(fs->m, TRUE, "vmpfw");
1025                                 mycpu->gd_cnt.v_intrans++;
1026                                 vm_object_deallocate(fs->first_object);
1027                                 fs->first_object = NULL;
1028                                 lwkt_reltoken(&vm_token);
1029                                 crit_exit();
1030                                 return (KERN_TRY_AGAIN);
1031                         }
1032
1033                         /*
1034                          * If reactivating a page from PQ_CACHE we may have
1035                          * to rate-limit.
1036                          */
1037                         queue = fs->m->queue;
1038                         vm_page_unqueue_nowakeup(fs->m);
1039
1040                         if ((queue - fs->m->pc) == PQ_CACHE && 
1041                             vm_page_count_severe()) {
1042                                 vm_page_activate(fs->m);
1043                                 unlock_and_deallocate(fs);
1044                                 vm_waitpfault();
1045                                 lwkt_reltoken(&vm_token);
1046                                 crit_exit();
1047                                 return (KERN_TRY_AGAIN);
1048                         }
1049
1050                         /*
1051                          * Mark page busy for other processes, and the 
1052                          * pagedaemon.  If it still isn't completely valid
1053                          * (readable), or if a read-ahead-mark is set on
1054                          * the VM page, jump to readrest, else we found the
1055                          * page and can return.
1056                          *
1057                          * We can release the spl once we have marked the
1058                          * page busy.
1059                          */
1060                         vm_page_busy(fs->m);
1061                         crit_exit();
1062
1063                         if (fs->m->object != &kernel_object) {
1064                                 if ((fs->m->valid & VM_PAGE_BITS_ALL) !=
1065                                     VM_PAGE_BITS_ALL) {
1066                                         goto readrest;
1067                                 }
1068                                 if (fs->m->flags & PG_RAM) {
1069                                         if (debug_cluster)
1070                                                 kprintf("R");
1071                                         vm_page_flag_clear(fs->m, PG_RAM);
1072                                         goto readrest;
1073                                 }
1074                         }
1075                         break; /* break to PAGE HAS BEEN FOUND */
1076                 }
1077
1078                 /*
1079                  * Page is not resident, If this is the search termination
1080                  * or the pager might contain the page, allocate a new page.
1081                  *
1082                  * NOTE: We are still in a critical section.
1083                  */
1084                 if (TRYPAGER(fs) || fs->object == fs->first_object) {
1085                         /*
1086                          * If the page is beyond the object size we fail
1087                          */
1088                         if (pindex >= fs->object->size) {
1089                                 lwkt_reltoken(&vm_token);
1090                                 crit_exit();
1091                                 unlock_and_deallocate(fs);
1092                                 return (KERN_PROTECTION_FAILURE);
1093                         }
1094
1095                         /*
1096                          * Ratelimit.
1097                          */
1098                         if (fs->didlimit == 0 && curproc != NULL) {
1099                                 int limticks;
1100
1101                                 limticks = vm_fault_ratelimit(curproc->p_vmspace);
1102                                 if (limticks) {
1103                                         lwkt_reltoken(&vm_token);
1104                                         crit_exit();
1105                                         unlock_and_deallocate(fs);
1106                                         tsleep(curproc, 0, "vmrate", limticks);
1107                                         fs->didlimit = 1;
1108                                         return (KERN_TRY_AGAIN);
1109                                 }
1110                         }
1111
1112                         /*
1113                          * Allocate a new page for this object/offset pair.
1114                          */
1115                         fs->m = NULL;
1116                         if (!vm_page_count_severe()) {
1117                                 fs->m = vm_page_alloc(fs->object, pindex,
1118                                     (fs->vp || fs->object->backing_object) ? VM_ALLOC_NORMAL : VM_ALLOC_NORMAL | VM_ALLOC_ZERO);
1119                         }
1120                         if (fs->m == NULL) {
1121                                 lwkt_reltoken(&vm_token);
1122                                 crit_exit();
1123                                 unlock_and_deallocate(fs);
1124                                 vm_waitpfault();
1125                                 return (KERN_TRY_AGAIN);
1126                         }
1127                 }
1128                 crit_exit();
1129
1130 readrest:
1131                 /*
1132                  * We have found an invalid or partially valid page, a
1133                  * page with a read-ahead mark which might be partially or
1134                  * fully valid (and maybe dirty too), or we have allocated
1135                  * a new page.
1136                  *
1137                  * Attempt to fault-in the page if there is a chance that the
1138                  * pager has it, and potentially fault in additional pages
1139                  * at the same time.
1140                  *
1141                  * We are NOT in splvm here and if TRYPAGER is true then
1142                  * fs.m will be non-NULL and will be PG_BUSY for us.
1143                  */
1144                 if (TRYPAGER(fs)) {
1145                         int rv;
1146                         int seqaccess;
1147                         u_char behavior = vm_map_entry_behavior(fs->entry);
1148
1149                         if (behavior == MAP_ENTRY_BEHAV_RANDOM)
1150                                 seqaccess = 0;
1151                         else
1152                                 seqaccess = -1;
1153
1154                         /*
1155                          * If sequential access is detected then attempt
1156                          * to deactivate/cache pages behind the scan to
1157                          * prevent resource hogging.
1158                          *
1159                          * Use of PG_RAM to detect sequential access
1160                          * also simulates multi-zone sequential access
1161                          * detection for free.
1162                          *
1163                          * NOTE: Partially valid dirty pages cannot be
1164                          *       deactivated without causing NFS picemeal
1165                          *       writes to barf.
1166                          */
1167                         if ((fs->first_object->type != OBJT_DEVICE) &&
1168                             (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL ||
1169                                 (behavior != MAP_ENTRY_BEHAV_RANDOM &&
1170                                  (fs->m->flags & PG_RAM)))
1171                         ) {
1172                                 vm_pindex_t scan_pindex;
1173                                 int scan_count = 16;
1174
1175                                 if (first_pindex < 16) {
1176                                         scan_pindex = 0;
1177                                         scan_count = 0;
1178                                 } else {
1179                                         scan_pindex = first_pindex - 16;
1180                                         if (scan_pindex < 16)
1181                                                 scan_count = scan_pindex;
1182                                         else
1183                                                 scan_count = 16;
1184                                 }
1185
1186                                 crit_enter();
1187                                 while (scan_count) {
1188                                         vm_page_t mt;
1189
1190                                         mt = vm_page_lookup(fs->first_object,
1191                                                             scan_pindex);
1192                                         if (mt == NULL ||
1193                                             (mt->valid != VM_PAGE_BITS_ALL)) {
1194                                                 break;
1195                                         }
1196                                         if (mt->busy ||
1197                                             (mt->flags & (PG_BUSY | PG_FICTITIOUS | PG_UNMANAGED)) ||
1198                                             mt->hold_count ||
1199                                             mt->wire_count)  {
1200                                                 goto skip;
1201                                         }
1202                                         if (mt->dirty == 0)
1203                                                 vm_page_test_dirty(mt);
1204                                         if (mt->dirty) {
1205                                                 vm_page_busy(mt);
1206                                                 vm_page_protect(mt,
1207                                                                 VM_PROT_NONE);
1208                                                 vm_page_deactivate(mt);
1209                                                 vm_page_wakeup(mt);
1210                                         } else {
1211                                                 vm_page_cache(mt);
1212                                         }
1213 skip:
1214                                         --scan_count;
1215                                         --scan_pindex;
1216                                 }
1217                                 crit_exit();
1218
1219                                 seqaccess = 1;
1220                         }
1221
1222                         /*
1223                          * Avoid deadlocking against the map when doing I/O.
1224                          * fs.object and the page is PG_BUSY'd.
1225                          */
1226                         unlock_map(fs);
1227
1228                         /*
1229                          * Acquire the page data.  We still hold a ref on
1230                          * fs.object and the page has been PG_BUSY's.
1231                          *
1232                          * The pager may replace the page (for example, in
1233                          * order to enter a fictitious page into the
1234                          * object).  If it does so it is responsible for
1235                          * cleaning up the passed page and properly setting
1236                          * the new page PG_BUSY.
1237                          *
1238                          * If we got here through a PG_RAM read-ahead
1239                          * mark the page may be partially dirty and thus
1240                          * not freeable.  Don't bother checking to see
1241                          * if the pager has the page because we can't free
1242                          * it anyway.  We have to depend on the get_page
1243                          * operation filling in any gaps whether there is
1244                          * backing store or not.
1245                          */
1246                         rv = vm_pager_get_page(fs->object, &fs->m, seqaccess);
1247
1248                         if (rv == VM_PAGER_OK) {
1249                                 /*
1250                                  * Relookup in case pager changed page. Pager
1251                                  * is responsible for disposition of old page
1252                                  * if moved.
1253                                  *
1254                                  * XXX other code segments do relookups too.
1255                                  * It's a bad abstraction that needs to be
1256                                  * fixed/removed.
1257                                  */
1258                                 fs->m = vm_page_lookup(fs->object, pindex);
1259                                 if (fs->m == NULL) {
1260                                         lwkt_reltoken(&vm_token);
1261                                         unlock_and_deallocate(fs);
1262                                         return (KERN_TRY_AGAIN);
1263                                 }
1264
1265                                 ++fs->hardfault;
1266                                 break; /* break to PAGE HAS BEEN FOUND */
1267                         }
1268
1269                         /*
1270                          * Remove the bogus page (which does not exist at this
1271                          * object/offset); before doing so, we must get back
1272                          * our object lock to preserve our invariant.
1273                          *
1274                          * Also wake up any other process that may want to bring
1275                          * in this page.
1276                          *
1277                          * If this is the top-level object, we must leave the
1278                          * busy page to prevent another process from rushing
1279                          * past us, and inserting the page in that object at
1280                          * the same time that we are.
1281                          */
1282                         if (rv == VM_PAGER_ERROR) {
1283                                 if (curproc)
1284                                         kprintf("vm_fault: pager read error, pid %d (%s)\n", curproc->p_pid, curproc->p_comm);
1285                                 else
1286                                         kprintf("vm_fault: pager read error, thread %p (%s)\n", curthread, curproc->p_comm);
1287                         }
1288
1289                         /*
1290                          * Data outside the range of the pager or an I/O error
1291                          *
1292                          * The page may have been wired during the pagein,
1293                          * e.g. by the buffer cache, and cannot simply be
1294                          * freed.  Call vnode_pager_freepage() to deal with it.
1295                          */
1296                         /*
1297                          * XXX - the check for kernel_map is a kludge to work
1298                          * around having the machine panic on a kernel space
1299                          * fault w/ I/O error.
1300                          */
1301                         if (((fs->map != &kernel_map) &&
1302                             (rv == VM_PAGER_ERROR)) || (rv == VM_PAGER_BAD)) {
1303                                 vnode_pager_freepage(fs->m);
1304                                 lwkt_reltoken(&vm_token);
1305                                 fs->m = NULL;
1306                                 unlock_and_deallocate(fs);
1307                                 if (rv == VM_PAGER_ERROR)
1308                                         return (KERN_FAILURE);
1309                                 else
1310                                         return (KERN_PROTECTION_FAILURE);
1311                                 /* NOT REACHED */
1312                         }
1313                         if (fs->object != fs->first_object) {
1314                                 vnode_pager_freepage(fs->m);
1315                                 fs->m = NULL;
1316                                 /*
1317                                  * XXX - we cannot just fall out at this
1318                                  * point, m has been freed and is invalid!
1319                                  */
1320                         }
1321                 }
1322
1323                 /*
1324                  * We get here if the object has a default pager (or unwiring) 
1325                  * or the pager doesn't have the page.
1326                  */
1327                 if (fs->object == fs->first_object)
1328                         fs->first_m = fs->m;
1329
1330                 /*
1331                  * Move on to the next object.  Lock the next object before
1332                  * unlocking the current one.
1333                  */
1334                 pindex += OFF_TO_IDX(fs->object->backing_object_offset);
1335                 next_object = fs->object->backing_object;
1336                 if (next_object == NULL) {
1337                         /*
1338                          * If there's no object left, fill the page in the top
1339                          * object with zeros.
1340                          */
1341                         if (fs->object != fs->first_object) {
1342                                 vm_object_pip_wakeup(fs->object);
1343
1344                                 fs->object = fs->first_object;
1345                                 pindex = first_pindex;
1346                                 fs->m = fs->first_m;
1347                         }
1348                         fs->first_m = NULL;
1349
1350                         /*
1351                          * Zero the page if necessary and mark it valid.
1352                          */
1353                         if ((fs->m->flags & PG_ZERO) == 0) {
1354                                 vm_page_zero_fill(fs->m);
1355                         } else {
1356                                 mycpu->gd_cnt.v_ozfod++;
1357                         }
1358                         mycpu->gd_cnt.v_zfod++;
1359                         fs->m->valid = VM_PAGE_BITS_ALL;
1360                         break;  /* break to PAGE HAS BEEN FOUND */
1361                 }
1362                 if (fs->object != fs->first_object) {
1363                         vm_object_pip_wakeup(fs->object);
1364                 }
1365                 KASSERT(fs->object != next_object,
1366                         ("object loop %p", next_object));
1367                 fs->object = next_object;
1368                 vm_object_pip_add(fs->object, 1);
1369         }
1370
1371         /*
1372          * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock
1373          * is held.]
1374          *
1375          * vm_token is still held
1376          *
1377          * If the page is being written, but isn't already owned by the
1378          * top-level object, we have to copy it into a new page owned by the
1379          * top-level object.
1380          */
1381         KASSERT((fs->m->flags & PG_BUSY) != 0,
1382                 ("vm_fault: not busy after main loop"));
1383
1384         if (fs->object != fs->first_object) {
1385                 /*
1386                  * We only really need to copy if we want to write it.
1387                  */
1388                 if (fault_type & VM_PROT_WRITE) {
1389                         /*
1390                          * This allows pages to be virtually copied from a 
1391                          * backing_object into the first_object, where the 
1392                          * backing object has no other refs to it, and cannot
1393                          * gain any more refs.  Instead of a bcopy, we just 
1394                          * move the page from the backing object to the 
1395                          * first object.  Note that we must mark the page 
1396                          * dirty in the first object so that it will go out 
1397                          * to swap when needed.
1398                          */
1399                         if (
1400                                 /*
1401                                  * Map, if present, has not changed
1402                                  */
1403                                 (fs->map == NULL ||
1404                                 fs->map_generation == fs->map->timestamp) &&
1405                                 /*
1406                                  * Only one shadow object
1407                                  */
1408                                 (fs->object->shadow_count == 1) &&
1409                                 /*
1410                                  * No COW refs, except us
1411                                  */
1412                                 (fs->object->ref_count == 1) &&
1413                                 /*
1414                                  * No one else can look this object up
1415                                  */
1416                                 (fs->object->handle == NULL) &&
1417                                 /*
1418                                  * No other ways to look the object up
1419                                  */
1420                                 ((fs->object->type == OBJT_DEFAULT) ||
1421                                  (fs->object->type == OBJT_SWAP)) &&
1422                                 /*
1423                                  * We don't chase down the shadow chain
1424                                  */
1425                                 (fs->object == fs->first_object->backing_object) &&
1426
1427                                 /*
1428                                  * grab the lock if we need to
1429                                  */
1430                                 (fs->lookup_still_valid ||
1431                                  fs->map == NULL ||
1432                                  lockmgr(&fs->map->lock, LK_EXCLUSIVE|LK_NOWAIT) == 0)
1433                             ) {
1434                                 
1435                                 fs->lookup_still_valid = 1;
1436                                 /*
1437                                  * get rid of the unnecessary page
1438                                  */
1439                                 vm_page_protect(fs->first_m, VM_PROT_NONE);
1440                                 vm_page_free(fs->first_m);
1441                                 fs->first_m = NULL;
1442
1443                                 /*
1444                                  * grab the page and put it into the 
1445                                  * process'es object.  The page is 
1446                                  * automatically made dirty.
1447                                  */
1448                                 vm_page_rename(fs->m, fs->first_object, first_pindex);
1449                                 fs->first_m = fs->m;
1450                                 vm_page_busy(fs->first_m);
1451                                 fs->m = NULL;
1452                                 mycpu->gd_cnt.v_cow_optim++;
1453                         } else {
1454                                 /*
1455                                  * Oh, well, lets copy it.
1456                                  */
1457                                 vm_page_copy(fs->m, fs->first_m);
1458                                 vm_page_event(fs->m, VMEVENT_COW);
1459                         }
1460
1461                         if (fs->m) {
1462                                 /*
1463                                  * We no longer need the old page or object.
1464                                  */
1465                                 release_page(fs);
1466                         }
1467
1468                         /*
1469                          * fs->object != fs->first_object due to above 
1470                          * conditional
1471                          */
1472                         vm_object_pip_wakeup(fs->object);
1473
1474                         /*
1475                          * Only use the new page below...
1476                          */
1477
1478                         mycpu->gd_cnt.v_cow_faults++;
1479                         fs->m = fs->first_m;
1480                         fs->object = fs->first_object;
1481                         pindex = first_pindex;
1482                 } else {
1483                         /*
1484                          * If it wasn't a write fault avoid having to copy
1485                          * the page by mapping it read-only.
1486                          */
1487                         fs->prot &= ~VM_PROT_WRITE;
1488                 }
1489         }
1490
1491         /*
1492          * We may have had to unlock a map to do I/O.  If we did then
1493          * lookup_still_valid will be FALSE.  If the map generation count
1494          * also changed then all sorts of things could have happened while
1495          * we were doing the I/O and we need to retry.
1496          */
1497
1498         if (!fs->lookup_still_valid &&
1499             fs->map != NULL &&
1500             (fs->map->timestamp != fs->map_generation)) {
1501                 release_page(fs);
1502                 lwkt_reltoken(&vm_token);
1503                 unlock_and_deallocate(fs);
1504                 return (KERN_TRY_AGAIN);
1505         }
1506
1507         /*
1508          * If the fault is a write, we know that this page is being
1509          * written NOW so dirty it explicitly to save on pmap_is_modified()
1510          * calls later.
1511          *
1512          * If this is a NOSYNC mmap we do not want to set PG_NOSYNC
1513          * if the page is already dirty to prevent data written with
1514          * the expectation of being synced from not being synced.
1515          * Likewise if this entry does not request NOSYNC then make
1516          * sure the page isn't marked NOSYNC.  Applications sharing
1517          * data should use the same flags to avoid ping ponging.
1518          *
1519          * Also tell the backing pager, if any, that it should remove
1520          * any swap backing since the page is now dirty.
1521          */
1522         if (fs->prot & VM_PROT_WRITE) {
1523                 vm_object_set_writeable_dirty(fs->m->object);
1524                 vm_set_nosync(fs->m, fs->entry);
1525                 if (fs->fault_flags & VM_FAULT_DIRTY) {
1526                         crit_enter();
1527                         vm_page_dirty(fs->m);
1528                         swap_pager_unswapped(fs->m);
1529                         crit_exit();
1530                 }
1531         }
1532
1533         lwkt_reltoken(&vm_token);
1534
1535         /*
1536          * Page had better still be busy.  We are still locked up and 
1537          * fs->object will have another PIP reference if it is not equal
1538          * to fs->first_object.
1539          */
1540         KASSERT(fs->m->flags & PG_BUSY,
1541                 ("vm_fault: page %p not busy!", fs->m));
1542
1543         /*
1544          * Sanity check: page must be completely valid or it is not fit to
1545          * map into user space.  vm_pager_get_pages() ensures this.
1546          */
1547         if (fs->m->valid != VM_PAGE_BITS_ALL) {
1548                 vm_page_zero_invalid(fs->m, TRUE);
1549                 kprintf("Warning: page %p partially invalid on fault\n", fs->m);
1550         }
1551
1552         return (KERN_SUCCESS);
1553 }
1554
1555 /*
1556  * Wire down a range of virtual addresses in a map.  The entry in question
1557  * should be marked in-transition and the map must be locked.  We must
1558  * release the map temporarily while faulting-in the page to avoid a
1559  * deadlock.  Note that the entry may be clipped while we are blocked but
1560  * will never be freed.
1561  *
1562  * No requirements.
1563  */
1564 int
1565 vm_fault_wire(vm_map_t map, vm_map_entry_t entry, boolean_t user_wire)
1566 {
1567         boolean_t fictitious;
1568         vm_offset_t start;
1569         vm_offset_t end;
1570         vm_offset_t va;
1571         vm_paddr_t pa;
1572         pmap_t pmap;
1573         int rv;
1574
1575         pmap = vm_map_pmap(map);
1576         start = entry->start;
1577         end = entry->end;
1578         fictitious = entry->object.vm_object &&
1579                         (entry->object.vm_object->type == OBJT_DEVICE);
1580
1581         lwkt_gettoken(&vm_token);
1582         vm_map_unlock(map);
1583         map->timestamp++;
1584
1585         /*
1586          * We simulate a fault to get the page and enter it in the physical
1587          * map.
1588          */
1589         for (va = start; va < end; va += PAGE_SIZE) {
1590                 if (user_wire) {
1591                         rv = vm_fault(map, va, VM_PROT_READ, 
1592                                         VM_FAULT_USER_WIRE);
1593                 } else {
1594                         rv = vm_fault(map, va, VM_PROT_READ|VM_PROT_WRITE,
1595                                         VM_FAULT_CHANGE_WIRING);
1596                 }
1597                 if (rv) {
1598                         while (va > start) {
1599                                 va -= PAGE_SIZE;
1600                                 if ((pa = pmap_extract(pmap, va)) == 0)
1601                                         continue;
1602                                 pmap_change_wiring(pmap, va, FALSE);
1603                                 if (!fictitious)
1604                                         vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1);
1605                         }
1606                         vm_map_lock(map);
1607                         lwkt_reltoken(&vm_token);
1608                         return (rv);
1609                 }
1610         }
1611         vm_map_lock(map);
1612         lwkt_reltoken(&vm_token);
1613         return (KERN_SUCCESS);
1614 }
1615
1616 /*
1617  * Unwire a range of virtual addresses in a map.  The map should be
1618  * locked.
1619  */
1620 void
1621 vm_fault_unwire(vm_map_t map, vm_map_entry_t entry)
1622 {
1623         boolean_t fictitious;
1624         vm_offset_t start;
1625         vm_offset_t end;
1626         vm_offset_t va;
1627         vm_paddr_t pa;
1628         pmap_t pmap;
1629
1630         pmap = vm_map_pmap(map);
1631         start = entry->start;
1632         end = entry->end;
1633         fictitious = entry->object.vm_object &&
1634                         (entry->object.vm_object->type == OBJT_DEVICE);
1635
1636         /*
1637          * Since the pages are wired down, we must be able to get their
1638          * mappings from the physical map system.
1639          */
1640         lwkt_gettoken(&vm_token);
1641         for (va = start; va < end; va += PAGE_SIZE) {
1642                 pa = pmap_extract(pmap, va);
1643                 if (pa != 0) {
1644                         pmap_change_wiring(pmap, va, FALSE);
1645                         if (!fictitious)
1646                                 vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1);
1647                 }
1648         }
1649         lwkt_reltoken(&vm_token);
1650 }
1651
1652 /*
1653  * Reduce the rate at which memory is allocated to a process based
1654  * on the perceived load on the VM system. As the load increases
1655  * the allocation burst rate goes down and the delay increases. 
1656  *
1657  * Rate limiting does not apply when faulting active or inactive
1658  * pages.  When faulting 'cache' pages, rate limiting only applies
1659  * if the system currently has a severe page deficit.
1660  *
1661  * XXX vm_pagesupply should be increased when a page is freed.
1662  *
1663  * We sleep up to 1/10 of a second.
1664  */
1665 static int
1666 vm_fault_ratelimit(struct vmspace *vmspace)
1667 {
1668         if (vm_load_enable == 0)
1669                 return(0);
1670         if (vmspace->vm_pagesupply > 0) {
1671                 --vmspace->vm_pagesupply;       /* SMP race ok */
1672                 return(0);
1673         }
1674 #ifdef INVARIANTS
1675         if (vm_load_debug) {
1676                 kprintf("load %-4d give %d pgs, wait %d, pid %-5d (%s)\n",
1677                         vm_load, 
1678                         (1000 - vm_load ) / 10, vm_load * hz / 10000,
1679                         curproc->p_pid, curproc->p_comm);
1680         }
1681 #endif
1682         vmspace->vm_pagesupply = (1000 - vm_load) / 10;
1683         return(vm_load * hz / 10000);
1684 }
1685
1686 /*
1687  * Copy all of the pages from a wired-down map entry to another.
1688  *
1689  * The source and destination maps must be locked for write.
1690  * The source map entry must be wired down (or be a sharing map
1691  * entry corresponding to a main map entry that is wired down).
1692  *
1693  * No other requirements.
1694  */
1695 void
1696 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
1697                     vm_map_entry_t dst_entry, vm_map_entry_t src_entry)
1698 {
1699         vm_object_t dst_object;
1700         vm_object_t src_object;
1701         vm_ooffset_t dst_offset;
1702         vm_ooffset_t src_offset;
1703         vm_prot_t prot;
1704         vm_offset_t vaddr;
1705         vm_page_t dst_m;
1706         vm_page_t src_m;
1707
1708 #ifdef  lint
1709         src_map++;
1710 #endif  /* lint */
1711
1712         src_object = src_entry->object.vm_object;
1713         src_offset = src_entry->offset;
1714
1715         /*
1716          * Create the top-level object for the destination entry. (Doesn't
1717          * actually shadow anything - we copy the pages directly.)
1718          */
1719         vm_map_entry_allocate_object(dst_entry);
1720         dst_object = dst_entry->object.vm_object;
1721
1722         prot = dst_entry->max_protection;
1723
1724         /*
1725          * Loop through all of the pages in the entry's range, copying each
1726          * one from the source object (it should be there) to the destination
1727          * object.
1728          */
1729         for (vaddr = dst_entry->start, dst_offset = 0;
1730             vaddr < dst_entry->end;
1731             vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) {
1732
1733                 /*
1734                  * Allocate a page in the destination object
1735                  */
1736                 do {
1737                         dst_m = vm_page_alloc(dst_object,
1738                                 OFF_TO_IDX(dst_offset), VM_ALLOC_NORMAL);
1739                         if (dst_m == NULL) {
1740                                 vm_wait(0);
1741                         }
1742                 } while (dst_m == NULL);
1743
1744                 /*
1745                  * Find the page in the source object, and copy it in.
1746                  * (Because the source is wired down, the page will be in
1747                  * memory.)
1748                  */
1749                 src_m = vm_page_lookup(src_object,
1750                         OFF_TO_IDX(dst_offset + src_offset));
1751                 if (src_m == NULL)
1752                         panic("vm_fault_copy_wired: page missing");
1753
1754                 vm_page_copy(src_m, dst_m);
1755                 vm_page_event(src_m, VMEVENT_COW);
1756
1757                 /*
1758                  * Enter it in the pmap...
1759                  */
1760
1761                 vm_page_flag_clear(dst_m, PG_ZERO);
1762                 pmap_enter(dst_map->pmap, vaddr, dst_m, prot, FALSE);
1763
1764                 /*
1765                  * Mark it no longer busy, and put it on the active list.
1766                  */
1767                 vm_page_activate(dst_m);
1768                 vm_page_wakeup(dst_m);
1769         }
1770 }
1771
1772 #if 0
1773
1774 /*
1775  * This routine checks around the requested page for other pages that
1776  * might be able to be faulted in.  This routine brackets the viable
1777  * pages for the pages to be paged in.
1778  *
1779  * Inputs:
1780  *      m, rbehind, rahead
1781  *
1782  * Outputs:
1783  *  marray (array of vm_page_t), reqpage (index of requested page)
1784  *
1785  * Return value:
1786  *  number of pages in marray
1787  */
1788 static int
1789 vm_fault_additional_pages(vm_page_t m, int rbehind, int rahead,
1790                           vm_page_t *marray, int *reqpage)
1791 {
1792         int i,j;
1793         vm_object_t object;
1794         vm_pindex_t pindex, startpindex, endpindex, tpindex;
1795         vm_page_t rtm;
1796         int cbehind, cahead;
1797
1798         object = m->object;
1799         pindex = m->pindex;
1800
1801         /*
1802          * we don't fault-ahead for device pager
1803          */
1804         if (object->type == OBJT_DEVICE) {
1805                 *reqpage = 0;
1806                 marray[0] = m;
1807                 return 1;
1808         }
1809
1810         /*
1811          * if the requested page is not available, then give up now
1812          */
1813         if (!vm_pager_has_page(object, pindex, &cbehind, &cahead)) {
1814                 *reqpage = 0;   /* not used by caller, fix compiler warn */
1815                 return 0;
1816         }
1817
1818         if ((cbehind == 0) && (cahead == 0)) {
1819                 *reqpage = 0;
1820                 marray[0] = m;
1821                 return 1;
1822         }
1823
1824         if (rahead > cahead) {
1825                 rahead = cahead;
1826         }
1827
1828         if (rbehind > cbehind) {
1829                 rbehind = cbehind;
1830         }
1831
1832         /*
1833          * Do not do any readahead if we have insufficient free memory.
1834          *
1835          * XXX code was broken disabled before and has instability
1836          * with this conditonal fixed, so shortcut for now.
1837          */
1838         if (burst_fault == 0 || vm_page_count_severe()) {
1839                 marray[0] = m;
1840                 *reqpage = 0;
1841                 return 1;
1842         }
1843
1844         /*
1845          * scan backward for the read behind pages -- in memory 
1846          *
1847          * Assume that if the page is not found an interrupt will not
1848          * create it.  Theoretically interrupts can only remove (busy)
1849          * pages, not create new associations.
1850          */
1851         if (pindex > 0) {
1852                 if (rbehind > pindex) {
1853                         rbehind = pindex;
1854                         startpindex = 0;
1855                 } else {
1856                         startpindex = pindex - rbehind;
1857                 }
1858
1859                 crit_enter();
1860                 lwkt_gettoken(&vm_token);
1861                 for (tpindex = pindex; tpindex > startpindex; --tpindex) {
1862                         if (vm_page_lookup(object, tpindex - 1))
1863                                 break;
1864                 }
1865
1866                 i = 0;
1867                 while (tpindex < pindex) {
1868                         rtm = vm_page_alloc(object, tpindex, VM_ALLOC_SYSTEM);
1869                         if (rtm == NULL) {
1870                                 lwkt_reltoken(&vm_token);
1871                                 crit_exit();
1872                                 for (j = 0; j < i; j++) {
1873                                         vm_page_free(marray[j]);
1874                                 }
1875                                 marray[0] = m;
1876                                 *reqpage = 0;
1877                                 return 1;
1878                         }
1879                         marray[i] = rtm;
1880                         ++i;
1881                         ++tpindex;
1882                 }
1883                 lwkt_reltoken(&vm_token);
1884                 crit_exit();
1885         } else {
1886                 i = 0;
1887         }
1888
1889         /*
1890          * Assign requested page
1891          */
1892         marray[i] = m;
1893         *reqpage = i;
1894         ++i;
1895
1896         /*
1897          * Scan forwards for read-ahead pages
1898          */
1899         tpindex = pindex + 1;
1900         endpindex = tpindex + rahead;
1901         if (endpindex > object->size)
1902                 endpindex = object->size;
1903
1904         crit_enter();
1905         lwkt_gettoken(&vm_token);
1906         while (tpindex < endpindex) {
1907                 if (vm_page_lookup(object, tpindex))
1908                         break;
1909                 rtm = vm_page_alloc(object, tpindex, VM_ALLOC_SYSTEM);
1910                 if (rtm == NULL)
1911                         break;
1912                 marray[i] = rtm;
1913                 ++i;
1914                 ++tpindex;
1915         }
1916         lwkt_reltoken(&vm_token);
1917         crit_exit();
1918
1919         return (i);
1920 }
1921
1922 #endif
1923
1924 /*
1925  * vm_prefault() provides a quick way of clustering pagefaults into a
1926  * processes address space.  It is a "cousin" of pmap_object_init_pt,
1927  * except it runs at page fault time instead of mmap time.
1928  *
1929  * This code used to be per-platform pmap_prefault().  It is now
1930  * machine-independent and enhanced to also pre-fault zero-fill pages
1931  * (see vm.fast_fault) as well as make them writable, which greatly
1932  * reduces the number of page faults programs incur.
1933  *
1934  * Application performance when pre-faulting zero-fill pages is heavily
1935  * dependent on the application.  Very tiny applications like /bin/echo
1936  * lose a little performance while applications of any appreciable size
1937  * gain performance.  Prefaulting multiple pages also reduces SMP
1938  * congestion and can improve SMP performance significantly.
1939  *
1940  * NOTE!  prot may allow writing but this only applies to the top level
1941  *        object.  If we wind up mapping a page extracted from a backing
1942  *        object we have to make sure it is read-only.
1943  *
1944  * NOTE!  The caller has already handled any COW operations on the
1945  *        vm_map_entry via the normal fault code.  Do NOT call this
1946  *        shortcut unless the normal fault code has run on this entry.
1947  *
1948  * No other requirements.
1949  */
1950 #define PFBAK 4
1951 #define PFFOR 4
1952 #define PAGEORDER_SIZE (PFBAK+PFFOR)
1953
1954 static int vm_prefault_pageorder[] = {
1955         -PAGE_SIZE, PAGE_SIZE,
1956         -2 * PAGE_SIZE, 2 * PAGE_SIZE,
1957         -3 * PAGE_SIZE, 3 * PAGE_SIZE,
1958         -4 * PAGE_SIZE, 4 * PAGE_SIZE
1959 };
1960
1961 /*
1962  * Set PG_NOSYNC if the map entry indicates so, but only if the page
1963  * is not already dirty by other means.  This will prevent passive
1964  * filesystem syncing as well as 'sync' from writing out the page.
1965  */
1966 static void
1967 vm_set_nosync(vm_page_t m, vm_map_entry_t entry)
1968 {
1969         if (entry->eflags & MAP_ENTRY_NOSYNC) {
1970                 if (m->dirty == 0)
1971                         vm_page_flag_set(m, PG_NOSYNC);
1972         } else {
1973                 vm_page_flag_clear(m, PG_NOSYNC);
1974         }
1975 }
1976
1977 static void
1978 vm_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry, int prot)
1979 {
1980         struct lwp *lp;
1981         vm_page_t m;
1982         vm_offset_t starta;
1983         vm_offset_t addr;
1984         vm_pindex_t index;
1985         vm_pindex_t pindex;
1986         vm_object_t object;
1987         int pprot;
1988         int i;
1989
1990         /*
1991          * We do not currently prefault mappings that use virtual page
1992          * tables.  We do not prefault foreign pmaps.
1993          */
1994         if (entry->maptype == VM_MAPTYPE_VPAGETABLE)
1995                 return;
1996         lp = curthread->td_lwp;
1997         if (lp == NULL || (pmap != vmspace_pmap(lp->lwp_vmspace)))
1998                 return;
1999
2000         object = entry->object.vm_object;
2001
2002         starta = addra - PFBAK * PAGE_SIZE;
2003         if (starta < entry->start)
2004                 starta = entry->start;
2005         else if (starta > addra)
2006                 starta = 0;
2007
2008         /*
2009          * critical section protection is required to maintain the
2010          * page/object association, interrupts can free pages and remove
2011          * them from their objects.
2012          */
2013         crit_enter();
2014         lwkt_gettoken(&vm_token);
2015         for (i = 0; i < PAGEORDER_SIZE; i++) {
2016                 vm_object_t lobject;
2017                 int allocated = 0;
2018
2019                 addr = addra + vm_prefault_pageorder[i];
2020                 if (addr > addra + (PFFOR * PAGE_SIZE))
2021                         addr = 0;
2022
2023                 if (addr < starta || addr >= entry->end)
2024                         continue;
2025
2026                 if (pmap_prefault_ok(pmap, addr) == 0)
2027                         continue;
2028
2029                 /*
2030                  * Follow the VM object chain to obtain the page to be mapped
2031                  * into the pmap.
2032                  *
2033                  * If we reach the terminal object without finding a page
2034                  * and we determine it would be advantageous, then allocate
2035                  * a zero-fill page for the base object.  The base object
2036                  * is guaranteed to be OBJT_DEFAULT for this case.
2037                  *
2038                  * In order to not have to check the pager via *haspage*()
2039                  * we stop if any non-default object is encountered.  e.g.
2040                  * a vnode or swap object would stop the loop.
2041                  */
2042                 index = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
2043                 lobject = object;
2044                 pindex = index;
2045                 pprot = prot;
2046
2047                 while ((m = vm_page_lookup(lobject, pindex)) == NULL) {
2048                         if (lobject->type != OBJT_DEFAULT)
2049                                 break;
2050                         if (lobject->backing_object == NULL) {
2051                                 if (vm_fast_fault == 0)
2052                                         break;
2053                                 if (vm_prefault_pageorder[i] < 0 ||
2054                                     (prot & VM_PROT_WRITE) == 0 ||
2055                                     vm_page_count_min(0)) {
2056                                         break;
2057                                 }
2058                                 /* note: allocate from base object */
2059                                 m = vm_page_alloc(object, index,
2060                                               VM_ALLOC_NORMAL | VM_ALLOC_ZERO);
2061
2062                                 if ((m->flags & PG_ZERO) == 0) {
2063                                         vm_page_zero_fill(m);
2064                                 } else {
2065                                         vm_page_flag_clear(m, PG_ZERO);
2066                                         mycpu->gd_cnt.v_ozfod++;
2067                                 }
2068                                 mycpu->gd_cnt.v_zfod++;
2069                                 m->valid = VM_PAGE_BITS_ALL;
2070                                 allocated = 1;
2071                                 pprot = prot;
2072                                 /* lobject = object .. not needed */
2073                                 break;
2074                         }
2075                         if (lobject->backing_object_offset & PAGE_MASK)
2076                                 break;
2077                         pindex += lobject->backing_object_offset >> PAGE_SHIFT;
2078                         lobject = lobject->backing_object;
2079                         pprot &= ~VM_PROT_WRITE;
2080                 }
2081                 /*
2082                  * NOTE: lobject now invalid (if we did a zero-fill we didn't
2083                  *       bother assigning lobject = object).
2084                  *
2085                  * Give-up if the page is not available.
2086                  */
2087                 if (m == NULL)
2088                         break;
2089
2090                 /*
2091                  * Do not conditionalize on PG_RAM.  If pages are present in
2092                  * the VM system we assume optimal caching.  If caching is
2093                  * not optimal the I/O gravy train will be restarted when we
2094                  * hit an unavailable page.  We do not want to try to restart
2095                  * the gravy train now because we really don't know how much
2096                  * of the object has been cached.  The cost for restarting
2097                  * the gravy train should be low (since accesses will likely
2098                  * be I/O bound anyway).
2099                  *
2100                  * The object must be marked dirty if we are mapping a
2101                  * writable page.
2102                  */
2103                 if (pprot & VM_PROT_WRITE)
2104                         vm_object_set_writeable_dirty(m->object);
2105
2106                 /*
2107                  * Enter the page into the pmap if appropriate.  If we had
2108                  * allocated the page we have to place it on a queue.  If not
2109                  * we just have to make sure it isn't on the cache queue
2110                  * (pages on the cache queue are not allowed to be mapped).
2111                  */
2112                 if (allocated) {
2113                         if (pprot & VM_PROT_WRITE)
2114                                 vm_set_nosync(m, entry);
2115                         pmap_enter(pmap, addr, m, pprot, 0);
2116                         vm_page_deactivate(m);
2117                         vm_page_wakeup(m);
2118                 } else if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
2119                     (m->busy == 0) &&
2120                     (m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
2121
2122                         if ((m->queue - m->pc) == PQ_CACHE) {
2123                                 vm_page_deactivate(m);
2124                         }
2125                         vm_page_busy(m);
2126                         if (pprot & VM_PROT_WRITE)
2127                                 vm_set_nosync(m, entry);
2128                         pmap_enter(pmap, addr, m, pprot, 0);
2129                         vm_page_wakeup(m);
2130                 }
2131         }
2132         lwkt_reltoken(&vm_token);
2133         crit_exit();
2134 }