Merge from vendor branch GCC:
[dragonfly.git] / sys / vm / vm_fault.c
1 /*
2  * Copyright (c) 1991, 1993
3  *      The Regents of the University of California.  All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  *
9  *
10  * This code is derived from software contributed to Berkeley by
11  * The Mach Operating System project at Carnegie-Mellon University.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. All advertising materials mentioning features or use of this software
22  *    must display the following acknowledgement:
23  *      This product includes software developed by the University of
24  *      California, Berkeley and its contributors.
25  * 4. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  *      from: @(#)vm_fault.c    8.4 (Berkeley) 1/12/94
42  *
43  *
44  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
45  * All rights reserved.
46  *
47  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
48  *
49  * Permission to use, copy, modify and distribute this software and
50  * its documentation is hereby granted, provided that both the copyright
51  * notice and this permission notice appear in all copies of the
52  * software, derivative works or modified versions, and any portions
53  * thereof, and that both notices appear in supporting documentation.
54  *
55  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
56  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
57  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
58  *
59  * Carnegie Mellon requests users of this software to return to
60  *
61  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
62  *  School of Computer Science
63  *  Carnegie Mellon University
64  *  Pittsburgh PA 15213-3890
65  *
66  * any improvements or extensions that they make and grant Carnegie the
67  * rights to redistribute these changes.
68  *
69  * $FreeBSD: src/sys/vm/vm_fault.c,v 1.108.2.8 2002/02/26 05:49:27 silby Exp $
70  * $DragonFly: src/sys/vm/vm_fault.c,v 1.46 2008/05/09 07:24:48 dillon Exp $
71  */
72
73 /*
74  *      Page fault handling module.
75  */
76
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/kernel.h>
80 #include <sys/proc.h>
81 #include <sys/vnode.h>
82 #include <sys/resourcevar.h>
83 #include <sys/vmmeter.h>
84 #include <sys/vkernel.h>
85 #include <sys/sfbuf.h>
86 #include <sys/lock.h>
87
88 #include <vm/vm.h>
89 #include <vm/vm_param.h>
90 #include <vm/pmap.h>
91 #include <vm/vm_map.h>
92 #include <vm/vm_object.h>
93 #include <vm/vm_page.h>
94 #include <vm/vm_pageout.h>
95 #include <vm/vm_kern.h>
96 #include <vm/vm_pager.h>
97 #include <vm/vnode_pager.h>
98 #include <vm/vm_extern.h>
99
100 #include <sys/thread2.h>
101 #include <vm/vm_page2.h>
102
103 #define VM_FAULT_READ_AHEAD 8
104 #define VM_FAULT_READ_BEHIND 7
105 #define VM_FAULT_READ (VM_FAULT_READ_AHEAD+VM_FAULT_READ_BEHIND+1)
106
107 struct faultstate {
108         vm_page_t m;
109         vm_object_t object;
110         vm_pindex_t pindex;
111         vm_prot_t prot;
112         vm_page_t first_m;
113         vm_object_t first_object;
114         vm_prot_t first_prot;
115         vm_map_t map;
116         vm_map_entry_t entry;
117         int lookup_still_valid;
118         int didlimit;
119         int hardfault;
120         int fault_flags;
121         int map_generation;
122         boolean_t wired;
123         struct vnode *vp;
124 };
125
126 static int vm_fault_object(struct faultstate *, vm_pindex_t, vm_prot_t);
127 static int vm_fault_vpagetable(struct faultstate *, vm_pindex_t *, vpte_t, int);
128 static int vm_fault_additional_pages (vm_page_t, int, int, vm_page_t *, int *);
129 static int vm_fault_ratelimit(struct vmspace *);
130
131 static __inline void
132 release_page(struct faultstate *fs)
133 {
134         vm_page_deactivate(fs->m);
135         vm_page_wakeup(fs->m);
136         fs->m = NULL;
137 }
138
139 static __inline void
140 unlock_map(struct faultstate *fs)
141 {
142         if (fs->lookup_still_valid && fs->map) {
143                 vm_map_lookup_done(fs->map, fs->entry, 0);
144                 fs->lookup_still_valid = FALSE;
145         }
146 }
147
148 /*
149  * Clean up after a successful call to vm_fault_object() so another call
150  * to vm_fault_object() can be made.
151  */
152 static void
153 _cleanup_successful_fault(struct faultstate *fs, int relock)
154 {
155         if (fs->object != fs->first_object) {
156                 vm_page_free(fs->first_m);
157                 vm_object_pip_wakeup(fs->object);
158                 fs->first_m = NULL;
159         }
160         fs->object = fs->first_object;
161         if (relock && fs->lookup_still_valid == FALSE) {
162                 if (fs->map)
163                         vm_map_lock_read(fs->map);
164                 fs->lookup_still_valid = TRUE;
165         }
166 }
167
168 static void
169 _unlock_things(struct faultstate *fs, int dealloc)
170 {
171         vm_object_pip_wakeup(fs->first_object);
172         _cleanup_successful_fault(fs, 0);
173         if (dealloc) {
174                 vm_object_deallocate(fs->first_object);
175         }
176         unlock_map(fs); 
177         if (fs->vp != NULL) { 
178                 vput(fs->vp);
179                 fs->vp = NULL;
180         }
181 }
182
183 #define unlock_things(fs) _unlock_things(fs, 0)
184 #define unlock_and_deallocate(fs) _unlock_things(fs, 1)
185 #define cleanup_successful_fault(fs) _cleanup_successful_fault(fs, 1)
186
187 /*
188  * TRYPAGER 
189  *
190  * Determine if the pager for the current object *might* contain the page.
191  *
192  * We only need to try the pager if this is not a default object (default
193  * objects are zero-fill and have no real pager), and if we are not taking
194  * a wiring fault or if the FS entry is wired.
195  */
196 #define TRYPAGER(fs)    \
197                 (fs->object->type != OBJT_DEFAULT && \
198                 (((fs->fault_flags & VM_FAULT_WIRE_MASK) == 0) || fs->wired))
199
200 /*
201  * vm_fault:
202  *
203  * Handle a page fault occuring at the given address, requiring the given
204  * permissions, in the map specified.  If successful, the page is inserted
205  * into the associated physical map.
206  *
207  * NOTE: The given address should be truncated to the proper page address.
208  *
209  * KERN_SUCCESS is returned if the page fault is handled; otherwise,
210  * a standard error specifying why the fault is fatal is returned.
211  *
212  * The map in question must be referenced, and remains so.
213  * The caller may hold no locks.
214  */
215 int
216 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags)
217 {
218         int result;
219         vm_pindex_t first_pindex;
220         struct faultstate fs;
221
222         mycpu->gd_cnt.v_vm_faults++;
223
224         fs.didlimit = 0;
225         fs.hardfault = 0;
226         fs.fault_flags = fault_flags;
227
228 RetryFault:
229         /*
230          * Find the vm_map_entry representing the backing store and resolve
231          * the top level object and page index.  This may have the side
232          * effect of executing a copy-on-write on the map entry and/or
233          * creating a shadow object, but will not COW any actual VM pages.
234          *
235          * On success fs.map is left read-locked and various other fields 
236          * are initialized but not otherwise referenced or locked.
237          *
238          * NOTE!  vm_map_lookup will try to upgrade the fault_type to
239          * VM_FAULT_WRITE if the map entry is a virtual page table and also
240          * writable, so we can set the 'A'accessed bit in the virtual page
241          * table entry.
242          */
243         fs.map = map;
244         result = vm_map_lookup(&fs.map, vaddr, fault_type,
245                                &fs.entry, &fs.first_object,
246                                &first_pindex, &fs.first_prot, &fs.wired);
247
248         /*
249          * If the lookup failed or the map protections are incompatible,
250          * the fault generally fails.  However, if the caller is trying
251          * to do a user wiring we have more work to do.
252          */
253         if (result != KERN_SUCCESS) {
254                 if (result != KERN_PROTECTION_FAILURE)
255                         return result;
256                 if ((fs.fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE)
257                         return result;
258
259                 /*
260                  * If we are user-wiring a r/w segment, and it is COW, then
261                  * we need to do the COW operation.  Note that we don't
262                  * currently COW RO sections now, because it is NOT desirable
263                  * to COW .text.  We simply keep .text from ever being COW'ed
264                  * and take the heat that one cannot debug wired .text sections.
265                  */
266                 result = vm_map_lookup(&fs.map, vaddr,
267                                        VM_PROT_READ|VM_PROT_WRITE|
268                                         VM_PROT_OVERRIDE_WRITE,
269                                        &fs.entry, &fs.first_object,
270                                        &first_pindex, &fs.first_prot,
271                                        &fs.wired);
272                 if (result != KERN_SUCCESS)
273                         return result;
274
275                 /*
276                  * If we don't COW now, on a user wire, the user will never
277                  * be able to write to the mapping.  If we don't make this
278                  * restriction, the bookkeeping would be nearly impossible.
279                  */
280                 if ((fs.entry->protection & VM_PROT_WRITE) == 0)
281                         fs.entry->max_protection &= ~VM_PROT_WRITE;
282         }
283
284         /*
285          * fs.map is read-locked
286          *
287          * Misc checks.  Save the map generation number to detect races.
288          */
289         fs.map_generation = fs.map->timestamp;
290
291         if (fs.entry->eflags & MAP_ENTRY_NOFAULT) {
292                 panic("vm_fault: fault on nofault entry, addr: %lx",
293                     (u_long)vaddr);
294         }
295
296         /*
297          * A system map entry may return a NULL object.  No object means
298          * no pager means an unrecoverable kernel fault.
299          */
300         if (fs.first_object == NULL) {
301                 panic("vm_fault: unrecoverable fault at %p in entry %p",
302                         (void *)vaddr, fs.entry);
303         }
304
305         /*
306          * Make a reference to this object to prevent its disposal while we
307          * are messing with it.  Once we have the reference, the map is free
308          * to be diddled.  Since objects reference their shadows (and copies),
309          * they will stay around as well.
310          *
311          * Bump the paging-in-progress count to prevent size changes (e.g.
312          * truncation operations) during I/O.  This must be done after
313          * obtaining the vnode lock in order to avoid possible deadlocks.
314          */
315         vm_object_reference(fs.first_object);
316         fs.vp = vnode_pager_lock(fs.first_object);
317         vm_object_pip_add(fs.first_object, 1);
318
319         fs.lookup_still_valid = TRUE;
320         fs.first_m = NULL;
321         fs.object = fs.first_object;    /* so unlock_and_deallocate works */
322
323         /*
324          * If the entry is wired we cannot change the page protection.
325          */
326         if (fs.wired)
327                 fault_type = fs.first_prot;
328
329         /*
330          * The page we want is at (first_object, first_pindex), but if the
331          * vm_map_entry is VM_MAPTYPE_VPAGETABLE we have to traverse the
332          * page table to figure out the actual pindex.
333          *
334          * NOTE!  DEVELOPMENT IN PROGRESS, THIS IS AN INITIAL IMPLEMENTATION
335          * ONLY
336          */
337         if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) {
338                 result = vm_fault_vpagetable(&fs, &first_pindex,
339                                              fs.entry->aux.master_pde,
340                                              fault_type);
341                 if (result == KERN_TRY_AGAIN)
342                         goto RetryFault;
343                 if (result != KERN_SUCCESS)
344                         return (result);
345         }
346
347         /*
348          * Now we have the actual (object, pindex), fault in the page.  If
349          * vm_fault_object() fails it will unlock and deallocate the FS
350          * data.   If it succeeds everything remains locked and fs->object
351          * will have an additinal PIP count if it is not equal to
352          * fs->first_object
353          *
354          * vm_fault_object will set fs->prot for the pmap operation.  It is
355          * allowed to set VM_PROT_WRITE if fault_type == VM_PROT_READ if the
356          * page can be safely written.  However, it will force a read-only
357          * mapping for a read fault if the memory is managed by a virtual
358          * page table.
359          */
360         result = vm_fault_object(&fs, first_pindex, fault_type);
361
362         if (result == KERN_TRY_AGAIN)
363                 goto RetryFault;
364         if (result != KERN_SUCCESS)
365                 return (result);
366
367         /*
368          * On success vm_fault_object() does not unlock or deallocate, and fs.m
369          * will contain a busied page.
370          *
371          * Enter the page into the pmap and do pmap-related adjustments.
372          */
373         unlock_things(&fs);
374         pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, fs.wired);
375
376         if (((fs.fault_flags & VM_FAULT_WIRE_MASK) == 0) && (fs.wired == 0)) {
377                 pmap_prefault(fs.map->pmap, vaddr, fs.entry);
378         }
379
380         vm_page_flag_clear(fs.m, PG_ZERO);
381         vm_page_flag_set(fs.m, PG_REFERENCED);
382
383         /*
384          * If the page is not wired down, then put it where the pageout daemon
385          * can find it.
386          */
387         if (fs.fault_flags & VM_FAULT_WIRE_MASK) {
388                 if (fs.wired)
389                         vm_page_wire(fs.m);
390                 else
391                         vm_page_unwire(fs.m, 1);
392         } else {
393                 vm_page_activate(fs.m);
394         }
395
396         if (curthread->td_lwp) {
397                 if (fs.hardfault) {
398                         curthread->td_lwp->lwp_ru.ru_majflt++;
399                 } else {
400                         curthread->td_lwp->lwp_ru.ru_minflt++;
401                 }
402         }
403
404         /*
405          * Unlock everything, and return
406          */
407         vm_page_wakeup(fs.m);
408         vm_object_deallocate(fs.first_object);
409
410         return (KERN_SUCCESS);
411 }
412
413 /*
414  * Fault in the specified virtual address in the current process map, 
415  * returning a held VM page or NULL.  See vm_fault_page() for more 
416  * information.
417  */
418 vm_page_t
419 vm_fault_page_quick(vm_offset_t va, vm_prot_t fault_type, int *errorp)
420 {
421         struct lwp *lp = curthread->td_lwp;
422         vm_page_t m;
423
424         m = vm_fault_page(&lp->lwp_vmspace->vm_map, va, 
425                           fault_type, VM_FAULT_NORMAL, errorp);
426         return(m);
427 }
428
429 /*
430  * Fault in the specified virtual address in the specified map, doing all
431  * necessary manipulation of the object store and all necessary I/O.  Return
432  * a held VM page or NULL, and set *errorp.  The related pmap is not
433  * updated.
434  *
435  * The returned page will be properly dirtied if VM_PROT_WRITE was specified,
436  * and marked PG_REFERENCED as well.
437  *
438  * If the page cannot be faulted writable and VM_PROT_WRITE was specified, an
439  * error will be returned.
440  */
441 vm_page_t
442 vm_fault_page(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
443               int fault_flags, int *errorp)
444 {
445         vm_pindex_t first_pindex;
446         struct faultstate fs;
447         int result;
448         vm_prot_t orig_fault_type = fault_type;
449
450         mycpu->gd_cnt.v_vm_faults++;
451
452         fs.didlimit = 0;
453         fs.hardfault = 0;
454         fs.fault_flags = fault_flags;
455         KKASSERT((fault_flags & VM_FAULT_WIRE_MASK) == 0);
456
457 RetryFault:
458         /*
459          * Find the vm_map_entry representing the backing store and resolve
460          * the top level object and page index.  This may have the side
461          * effect of executing a copy-on-write on the map entry and/or
462          * creating a shadow object, but will not COW any actual VM pages.
463          *
464          * On success fs.map is left read-locked and various other fields 
465          * are initialized but not otherwise referenced or locked.
466          *
467          * NOTE!  vm_map_lookup will upgrade the fault_type to VM_FAULT_WRITE
468          * if the map entry is a virtual page table and also writable,
469          * so we can set the 'A'accessed bit in the virtual page table entry.
470          */
471         fs.map = map;
472         result = vm_map_lookup(&fs.map, vaddr, fault_type,
473                                &fs.entry, &fs.first_object,
474                                &first_pindex, &fs.first_prot, &fs.wired);
475
476         if (result != KERN_SUCCESS) {
477                 *errorp = result;
478                 return (NULL);
479         }
480
481         /*
482          * fs.map is read-locked
483          *
484          * Misc checks.  Save the map generation number to detect races.
485          */
486         fs.map_generation = fs.map->timestamp;
487
488         if (fs.entry->eflags & MAP_ENTRY_NOFAULT) {
489                 panic("vm_fault: fault on nofault entry, addr: %lx",
490                     (u_long)vaddr);
491         }
492
493         /*
494          * A system map entry may return a NULL object.  No object means
495          * no pager means an unrecoverable kernel fault.
496          */
497         if (fs.first_object == NULL) {
498                 panic("vm_fault: unrecoverable fault at %p in entry %p",
499                         (void *)vaddr, fs.entry);
500         }
501
502         /*
503          * Make a reference to this object to prevent its disposal while we
504          * are messing with it.  Once we have the reference, the map is free
505          * to be diddled.  Since objects reference their shadows (and copies),
506          * they will stay around as well.
507          *
508          * Bump the paging-in-progress count to prevent size changes (e.g.
509          * truncation operations) during I/O.  This must be done after
510          * obtaining the vnode lock in order to avoid possible deadlocks.
511          */
512         vm_object_reference(fs.first_object);
513         fs.vp = vnode_pager_lock(fs.first_object);
514         vm_object_pip_add(fs.first_object, 1);
515
516         fs.lookup_still_valid = TRUE;
517         fs.first_m = NULL;
518         fs.object = fs.first_object;    /* so unlock_and_deallocate works */
519
520         /*
521          * If the entry is wired we cannot change the page protection.
522          */
523         if (fs.wired)
524                 fault_type = fs.first_prot;
525
526         /*
527          * The page we want is at (first_object, first_pindex), but if the
528          * vm_map_entry is VM_MAPTYPE_VPAGETABLE we have to traverse the
529          * page table to figure out the actual pindex.
530          *
531          * NOTE!  DEVELOPMENT IN PROGRESS, THIS IS AN INITIAL IMPLEMENTATION
532          * ONLY
533          */
534         if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) {
535                 result = vm_fault_vpagetable(&fs, &first_pindex,
536                                              fs.entry->aux.master_pde,
537                                              fault_type);
538                 if (result == KERN_TRY_AGAIN)
539                         goto RetryFault;
540                 if (result != KERN_SUCCESS) {
541                         *errorp = result;
542                         return (NULL);
543                 }
544         }
545
546         /*
547          * Now we have the actual (object, pindex), fault in the page.  If
548          * vm_fault_object() fails it will unlock and deallocate the FS
549          * data.   If it succeeds everything remains locked and fs->object
550          * will have an additinal PIP count if it is not equal to
551          * fs->first_object
552          */
553         result = vm_fault_object(&fs, first_pindex, fault_type);
554
555         if (result == KERN_TRY_AGAIN)
556                 goto RetryFault;
557         if (result != KERN_SUCCESS) {
558                 *errorp = result;
559                 return(NULL);
560         }
561
562         if ((orig_fault_type & VM_PROT_WRITE) &&
563             (fs.prot & VM_PROT_WRITE) == 0) {
564                 *errorp = KERN_PROTECTION_FAILURE;
565                 unlock_and_deallocate(&fs);
566                 return(NULL);
567         }
568
569         /*
570          * On success vm_fault_object() does not unlock or deallocate, and fs.m
571          * will contain a busied page.
572          */
573         unlock_things(&fs);
574
575         /*
576          * Return a held page.  We are not doing any pmap manipulation so do
577          * not set PG_MAPPED.  However, adjust the page flags according to
578          * the fault type because the caller may not use a managed pmapping
579          * (so we don't want to lose the fact that the page will be dirtied
580          * if a write fault was specified).
581          */
582         vm_page_hold(fs.m);
583         vm_page_flag_clear(fs.m, PG_ZERO);
584         if (fault_type & VM_PROT_WRITE)
585                 vm_page_dirty(fs.m);
586
587         /*
588          * Update the pmap.  We really only have to do this if a COW
589          * occured to replace the read-only page with the new page.  For
590          * now just do it unconditionally. XXX
591          */
592         pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, fs.wired);
593         vm_page_flag_set(fs.m, PG_REFERENCED);
594
595         /*
596          * Unbusy the page by activating it.  It remains held and will not
597          * be reclaimed.
598          */
599         vm_page_activate(fs.m);
600
601         if (curthread->td_lwp) {
602                 if (fs.hardfault) {
603                         curthread->td_lwp->lwp_ru.ru_majflt++;
604                 } else {
605                         curthread->td_lwp->lwp_ru.ru_minflt++;
606                 }
607         }
608
609         /*
610          * Unlock everything, and return the held page.
611          */
612         vm_page_wakeup(fs.m);
613         vm_object_deallocate(fs.first_object);
614
615         *errorp = 0;
616         return(fs.m);
617 }
618
619 /*
620  * Fault in the specified (object,offset), dirty the returned page as
621  * needed.  If the requested fault_type cannot be done NULL and an
622  * error is returned.
623  */
624 vm_page_t
625 vm_fault_object_page(vm_object_t object, vm_ooffset_t offset,
626                      vm_prot_t fault_type, int fault_flags, int *errorp)
627 {
628         int result;
629         vm_pindex_t first_pindex;
630         struct faultstate fs;
631         struct vm_map_entry entry;
632
633         bzero(&entry, sizeof(entry));
634         entry.object.vm_object = object;
635         entry.maptype = VM_MAPTYPE_NORMAL;
636         entry.protection = entry.max_protection = fault_type;
637
638         fs.didlimit = 0;
639         fs.hardfault = 0;
640         fs.fault_flags = fault_flags;
641         fs.map = NULL;
642         KKASSERT((fault_flags & VM_FAULT_WIRE_MASK) == 0);
643
644 RetryFault:
645         
646         fs.first_object = object;
647         first_pindex = OFF_TO_IDX(offset);
648         fs.entry = &entry;
649         fs.first_prot = fault_type;
650         fs.wired = 0;
651         /*fs.map_generation = 0; unused */
652
653         /*
654          * Make a reference to this object to prevent its disposal while we
655          * are messing with it.  Once we have the reference, the map is free
656          * to be diddled.  Since objects reference their shadows (and copies),
657          * they will stay around as well.
658          *
659          * Bump the paging-in-progress count to prevent size changes (e.g.
660          * truncation operations) during I/O.  This must be done after
661          * obtaining the vnode lock in order to avoid possible deadlocks.
662          */
663         vm_object_reference(fs.first_object);
664         fs.vp = vnode_pager_lock(fs.first_object);
665         vm_object_pip_add(fs.first_object, 1);
666
667         fs.lookup_still_valid = TRUE;
668         fs.first_m = NULL;
669         fs.object = fs.first_object;    /* so unlock_and_deallocate works */
670
671 #if 0
672         /* XXX future - ability to operate on VM object using vpagetable */
673         if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) {
674                 result = vm_fault_vpagetable(&fs, &first_pindex,
675                                              fs.entry->aux.master_pde,
676                                              fault_type);
677                 if (result == KERN_TRY_AGAIN)
678                         goto RetryFault;
679                 if (result != KERN_SUCCESS) {
680                         *errorp = result;
681                         return (NULL);
682                 }
683         }
684 #endif
685
686         /*
687          * Now we have the actual (object, pindex), fault in the page.  If
688          * vm_fault_object() fails it will unlock and deallocate the FS
689          * data.   If it succeeds everything remains locked and fs->object
690          * will have an additinal PIP count if it is not equal to
691          * fs->first_object
692          */
693         result = vm_fault_object(&fs, first_pindex, fault_type);
694
695         if (result == KERN_TRY_AGAIN)
696                 goto RetryFault;
697         if (result != KERN_SUCCESS) {
698                 *errorp = result;
699                 return(NULL);
700         }
701
702         if ((fault_type & VM_PROT_WRITE) && (fs.prot & VM_PROT_WRITE) == 0) {
703                 *errorp = KERN_PROTECTION_FAILURE;
704                 unlock_and_deallocate(&fs);
705                 return(NULL);
706         }
707
708         /*
709          * On success vm_fault_object() does not unlock or deallocate, and fs.m
710          * will contain a busied page.
711          */
712         unlock_things(&fs);
713
714         /*
715          * Return a held page.  We are not doing any pmap manipulation so do
716          * not set PG_MAPPED.  However, adjust the page flags according to
717          * the fault type because the caller may not use a managed pmapping
718          * (so we don't want to lose the fact that the page will be dirtied
719          * if a write fault was specified).
720          */
721         vm_page_hold(fs.m);
722         vm_page_flag_clear(fs.m, PG_ZERO);
723         if (fault_type & VM_PROT_WRITE)
724                 vm_page_dirty(fs.m);
725
726         /*
727          * Indicate that the page was accessed.
728          */
729         vm_page_flag_set(fs.m, PG_REFERENCED);
730
731         /*
732          * Unbusy the page by activating it.  It remains held and will not
733          * be reclaimed.
734          */
735         vm_page_activate(fs.m);
736
737         if (curthread->td_lwp) {
738                 if (fs.hardfault) {
739                         mycpu->gd_cnt.v_vm_faults++;
740                         curthread->td_lwp->lwp_ru.ru_majflt++;
741                 } else {
742                         curthread->td_lwp->lwp_ru.ru_minflt++;
743                 }
744         }
745
746         /*
747          * Unlock everything, and return the held page.
748          */
749         vm_page_wakeup(fs.m);
750         vm_object_deallocate(fs.first_object);
751
752         *errorp = 0;
753         return(fs.m);
754 }
755
756 /*
757  * Translate the virtual page number (first_pindex) that is relative
758  * to the address space into a logical page number that is relative to the
759  * backing object.  Use the virtual page table pointed to by (vpte).
760  *
761  * This implements an N-level page table.  Any level can terminate the
762  * scan by setting VPTE_PS.   A linear mapping is accomplished by setting
763  * VPTE_PS in the master page directory entry set via mcontrol(MADV_SETMAP).
764  */
765 static
766 int
767 vm_fault_vpagetable(struct faultstate *fs, vm_pindex_t *pindex,
768                     vpte_t vpte, int fault_type)
769 {
770         struct sf_buf *sf;
771         int vshift = 32 - PAGE_SHIFT;   /* page index bits remaining */
772         int result = KERN_SUCCESS;
773         vpte_t *ptep;
774
775         for (;;) {
776                 /*
777                  * We cannot proceed if the vpte is not valid, not readable
778                  * for a read fault, or not writable for a write fault.
779                  */
780                 if ((vpte & VPTE_V) == 0) {
781                         unlock_and_deallocate(fs);
782                         return (KERN_FAILURE);
783                 }
784                 if ((fault_type & VM_PROT_READ) && (vpte & VPTE_R) == 0) {
785                         unlock_and_deallocate(fs);
786                         return (KERN_FAILURE);
787                 }
788                 if ((fault_type & VM_PROT_WRITE) && (vpte & VPTE_W) == 0) {
789                         unlock_and_deallocate(fs);
790                         return (KERN_FAILURE);
791                 }
792                 if ((vpte & VPTE_PS) || vshift == 0)
793                         break;
794                 KKASSERT(vshift >= VPTE_PAGE_BITS);
795
796                 /*
797                  * Get the page table page.  Nominally we only read the page
798                  * table, but since we are actively setting VPTE_M and VPTE_A,
799                  * tell vm_fault_object() that we are writing it. 
800                  *
801                  * There is currently no real need to optimize this.
802                  */
803                 result = vm_fault_object(fs, vpte >> PAGE_SHIFT,
804                                          VM_PROT_READ|VM_PROT_WRITE);
805                 if (result != KERN_SUCCESS)
806                         return (result);
807
808                 /*
809                  * Process the returned fs.m and look up the page table
810                  * entry in the page table page.
811                  */
812                 vshift -= VPTE_PAGE_BITS;
813                 sf = sf_buf_alloc(fs->m, SFB_CPUPRIVATE);
814                 ptep = ((vpte_t *)sf_buf_kva(sf) +
815                         ((*pindex >> vshift) & VPTE_PAGE_MASK));
816                 vpte = *ptep;
817
818                 /*
819                  * Page table write-back.  If the vpte is valid for the
820                  * requested operation, do a write-back to the page table.
821                  *
822                  * XXX VPTE_M is not set properly for page directory pages.
823                  * It doesn't get set in the page directory if the page table
824                  * is modified during a read access.
825                  */
826                 if ((fault_type & VM_PROT_WRITE) && (vpte & VPTE_V) &&
827                     (vpte & VPTE_W)) {
828                         if ((vpte & (VPTE_M|VPTE_A)) != (VPTE_M|VPTE_A)) {
829                                 atomic_set_int(ptep, VPTE_M|VPTE_A);
830                                 vm_page_dirty(fs->m);
831                         }
832                 }
833                 if ((fault_type & VM_PROT_READ) && (vpte & VPTE_V) &&
834                     (vpte & VPTE_R)) {
835                         if ((vpte & VPTE_A) == 0) {
836                                 atomic_set_int(ptep, VPTE_A);
837                                 vm_page_dirty(fs->m);
838                         }
839                 }
840                 sf_buf_free(sf);
841                 vm_page_flag_set(fs->m, PG_REFERENCED);
842                 vm_page_activate(fs->m);
843                 vm_page_wakeup(fs->m);
844                 cleanup_successful_fault(fs);
845         }
846         /*
847          * Combine remaining address bits with the vpte.
848          */
849         *pindex = (vpte >> PAGE_SHIFT) +
850                   (*pindex & ((1 << vshift) - 1));
851         return (KERN_SUCCESS);
852 }
853
854
855 /*
856  * Do all operations required to fault-in (fs.first_object, pindex).  Run
857  * through the shadow chain as necessary and do required COW or virtual
858  * copy operations.  The caller has already fully resolved the vm_map_entry
859  * and, if appropriate, has created a copy-on-write layer.  All we need to
860  * do is iterate the object chain.
861  *
862  * On failure (fs) is unlocked and deallocated and the caller may return or
863  * retry depending on the failure code.  On success (fs) is NOT unlocked or
864  * deallocated, fs.m will contained a resolved, busied page, and fs.object
865  * will have an additional PIP count if it is not equal to fs.first_object.
866  */
867 static
868 int
869 vm_fault_object(struct faultstate *fs,
870                 vm_pindex_t first_pindex, vm_prot_t fault_type)
871 {
872         vm_object_t next_object;
873         vm_page_t marray[VM_FAULT_READ];
874         vm_pindex_t pindex;
875         int faultcount;
876
877         fs->prot = fs->first_prot;
878         fs->object = fs->first_object;
879         pindex = first_pindex;
880
881         /* 
882          * If a read fault occurs we try to make the page writable if
883          * possible.  There are three cases where we cannot make the
884          * page mapping writable:
885          *
886          * (1) The mapping is read-only or the VM object is read-only,
887          *     fs->prot above will simply not have VM_PROT_WRITE set.
888          *
889          * (2) If the mapping is a virtual page table we need to be able
890          *     to detect writes so we can set VPTE_M in the virtual page
891          *     table.
892          *
893          * (3) If the VM page is read-only or copy-on-write, upgrading would
894          *     just result in an unnecessary COW fault.
895          *
896          * VM_PROT_VPAGED is set if faulting via a virtual page table and
897          * causes adjustments to the 'M'odify bit to also turn off write
898          * access to force a re-fault.
899          */
900         if (fs->entry->maptype == VM_MAPTYPE_VPAGETABLE) {
901                 if ((fault_type & VM_PROT_WRITE) == 0)
902                         fs->prot &= ~VM_PROT_WRITE;
903         }
904
905         for (;;) {
906                 /*
907                  * If the object is dead, we stop here
908                  */
909                 if (fs->object->flags & OBJ_DEAD) {
910                         unlock_and_deallocate(fs);
911                         return (KERN_PROTECTION_FAILURE);
912                 }
913
914                 /*
915                  * See if page is resident.  spl protection is required
916                  * to avoid an interrupt unbusy/free race against our
917                  * lookup.  We must hold the protection through a page
918                  * allocation or busy.
919                  */
920                 crit_enter();
921                 fs->m = vm_page_lookup(fs->object, pindex);
922                 if (fs->m != NULL) {
923                         int queue;
924                         /*
925                          * Wait/Retry if the page is busy.  We have to do this
926                          * if the page is busy via either PG_BUSY or 
927                          * vm_page_t->busy because the vm_pager may be using
928                          * vm_page_t->busy for pageouts ( and even pageins if
929                          * it is the vnode pager ), and we could end up trying
930                          * to pagein and pageout the same page simultaneously.
931                          *
932                          * We can theoretically allow the busy case on a read
933                          * fault if the page is marked valid, but since such
934                          * pages are typically already pmap'd, putting that
935                          * special case in might be more effort then it is 
936                          * worth.  We cannot under any circumstances mess
937                          * around with a vm_page_t->busy page except, perhaps,
938                          * to pmap it.
939                          */
940                         if ((fs->m->flags & PG_BUSY) || fs->m->busy) {
941                                 unlock_things(fs);
942                                 vm_page_sleep_busy(fs->m, TRUE, "vmpfw");
943                                 mycpu->gd_cnt.v_intrans++;
944                                 vm_object_deallocate(fs->first_object);
945                                 crit_exit();
946                                 return (KERN_TRY_AGAIN);
947                         }
948
949                         /*
950                          * If reactivating a page from PQ_CACHE we may have
951                          * to rate-limit.
952                          */
953                         queue = fs->m->queue;
954                         vm_page_unqueue_nowakeup(fs->m);
955
956                         if ((queue - fs->m->pc) == PQ_CACHE && 
957                             vm_page_count_severe()) {
958                                 vm_page_activate(fs->m);
959                                 unlock_and_deallocate(fs);
960                                 vm_waitpfault();
961                                 crit_exit();
962                                 return (KERN_TRY_AGAIN);
963                         }
964
965                         /*
966                          * Mark page busy for other processes, and the 
967                          * pagedaemon.  If it still isn't completely valid
968                          * (readable), jump to readrest, else we found the
969                          * page and can return.
970                          *
971                          * We can release the spl once we have marked the
972                          * page busy.
973                          */
974                         vm_page_busy(fs->m);
975                         crit_exit();
976
977                         if (((fs->m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) &&
978                             fs->m->object != &kernel_object) {
979                                 goto readrest;
980                         }
981                         break; /* break to PAGE HAS BEEN FOUND */
982                 }
983
984                 /*
985                  * Page is not resident, If this is the search termination
986                  * or the pager might contain the page, allocate a new page.
987                  *
988                  * NOTE: We are still in a critical section.
989                  */
990                 if (TRYPAGER(fs) || fs->object == fs->first_object) {
991                         /*
992                          * If the page is beyond the object size we fail
993                          */
994                         if (pindex >= fs->object->size) {
995                                 crit_exit();
996                                 unlock_and_deallocate(fs);
997                                 return (KERN_PROTECTION_FAILURE);
998                         }
999
1000                         /*
1001                          * Ratelimit.
1002                          */
1003                         if (fs->didlimit == 0 && curproc != NULL) {
1004                                 int limticks;
1005
1006                                 limticks = vm_fault_ratelimit(curproc->p_vmspace);
1007                                 if (limticks) {
1008                                         crit_exit();
1009                                         unlock_and_deallocate(fs);
1010                                         tsleep(curproc, 0, "vmrate", limticks);
1011                                         fs->didlimit = 1;
1012                                         return (KERN_TRY_AGAIN);
1013                                 }
1014                         }
1015
1016                         /*
1017                          * Allocate a new page for this object/offset pair.
1018                          */
1019                         fs->m = NULL;
1020                         if (!vm_page_count_severe()) {
1021                                 fs->m = vm_page_alloc(fs->object, pindex,
1022                                     (fs->vp || fs->object->backing_object) ? VM_ALLOC_NORMAL : VM_ALLOC_NORMAL | VM_ALLOC_ZERO);
1023                         }
1024                         if (fs->m == NULL) {
1025                                 crit_exit();
1026                                 unlock_and_deallocate(fs);
1027                                 vm_waitpfault();
1028                                 return (KERN_TRY_AGAIN);
1029                         }
1030                 }
1031                 crit_exit();
1032
1033 readrest:
1034                 /*
1035                  * We have found a valid page or we have allocated a new page.
1036                  * The page thus may not be valid or may not be entirely 
1037                  * valid.
1038                  *
1039                  * Attempt to fault-in the page if there is a chance that the
1040                  * pager has it, and potentially fault in additional pages
1041                  * at the same time.
1042                  *
1043                  * We are NOT in splvm here and if TRYPAGER is true then
1044                  * fs.m will be non-NULL and will be PG_BUSY for us.
1045                  */
1046
1047                 if (TRYPAGER(fs)) {
1048                         int rv;
1049                         int reqpage;
1050                         int ahead, behind;
1051                         u_char behavior = vm_map_entry_behavior(fs->entry);
1052
1053                         if (behavior == MAP_ENTRY_BEHAV_RANDOM) {
1054                                 ahead = 0;
1055                                 behind = 0;
1056                         } else {
1057                                 behind = pindex;
1058                                 if (behind > VM_FAULT_READ_BEHIND)
1059                                         behind = VM_FAULT_READ_BEHIND;
1060
1061                                 ahead = fs->object->size - pindex;
1062                                 if (ahead < 1)
1063                                         ahead = 1;
1064                                 if (ahead > VM_FAULT_READ_AHEAD)
1065                                         ahead = VM_FAULT_READ_AHEAD;
1066                         }
1067
1068                         if ((fs->first_object->type != OBJT_DEVICE) &&
1069                             (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL ||
1070                                 (behavior != MAP_ENTRY_BEHAV_RANDOM &&
1071                                 pindex >= fs->entry->lastr &&
1072                                 pindex < fs->entry->lastr + VM_FAULT_READ))
1073                         ) {
1074                                 vm_pindex_t firstpindex, tmppindex;
1075
1076                                 if (first_pindex < 2 * VM_FAULT_READ)
1077                                         firstpindex = 0;
1078                                 else
1079                                         firstpindex = first_pindex - 2 * VM_FAULT_READ;
1080
1081                                 /*
1082                                  * note: partially valid pages cannot be 
1083                                  * included in the lookahead - NFS piecemeal
1084                                  * writes will barf on it badly.
1085                                  *
1086                                  * spl protection is required to avoid races
1087                                  * between the lookup and an interrupt
1088                                  * unbusy/free sequence occuring prior to
1089                                  * our busy check.
1090                                  */
1091                                 crit_enter();
1092                                 for (tmppindex = first_pindex - 1;
1093                                     tmppindex >= firstpindex;
1094                                     --tmppindex
1095                                 ) {
1096                                         vm_page_t mt;
1097
1098                                         mt = vm_page_lookup(fs->first_object, tmppindex);
1099                                         if (mt == NULL || (mt->valid != VM_PAGE_BITS_ALL))
1100                                                 break;
1101                                         if (mt->busy ||
1102                                                 (mt->flags & (PG_BUSY | PG_FICTITIOUS | PG_UNMANAGED)) ||
1103                                                 mt->hold_count ||
1104                                                 mt->wire_count) 
1105                                                 continue;
1106                                         if (mt->dirty == 0)
1107                                                 vm_page_test_dirty(mt);
1108                                         if (mt->dirty) {
1109                                                 vm_page_busy(mt);
1110                                                 vm_page_protect(mt, VM_PROT_NONE);
1111                                                 vm_page_deactivate(mt);
1112                                                 vm_page_wakeup(mt);
1113                                         } else {
1114                                                 vm_page_cache(mt);
1115                                         }
1116                                 }
1117                                 crit_exit();
1118
1119                                 ahead += behind;
1120                                 behind = 0;
1121                         }
1122
1123                         /*
1124                          * now we find out if any other pages should be paged
1125                          * in at this time this routine checks to see if the
1126                          * pages surrounding this fault reside in the same
1127                          * object as the page for this fault.  If they do,
1128                          * then they are faulted in also into the object.  The
1129                          * array "marray" returned contains an array of
1130                          * vm_page_t structs where one of them is the
1131                          * vm_page_t passed to the routine.  The reqpage
1132                          * return value is the index into the marray for the
1133                          * vm_page_t passed to the routine.
1134                          *
1135                          * fs.m plus the additional pages are PG_BUSY'd.
1136                          */
1137                         faultcount = vm_fault_additional_pages(
1138                             fs->m, behind, ahead, marray, &reqpage);
1139
1140                         /*
1141                          * update lastr imperfectly (we do not know how much
1142                          * getpages will actually read), but good enough.
1143                          */
1144                         fs->entry->lastr = pindex + faultcount - behind;
1145
1146                         /*
1147                          * Call the pager to retrieve the data, if any, after
1148                          * releasing the lock on the map.  We hold a ref on
1149                          * fs.object and the pages are PG_BUSY'd.
1150                          */
1151                         unlock_map(fs);
1152
1153                         if (faultcount) {
1154                                 rv = vm_pager_get_pages(fs->object, marray, 
1155                                                         faultcount, reqpage);
1156                         } else {
1157                                 rv = VM_PAGER_FAIL;
1158                         }
1159
1160                         if (rv == VM_PAGER_OK) {
1161                                 /*
1162                                  * Found the page. Leave it busy while we play
1163                                  * with it.
1164                                  */
1165
1166                                 /*
1167                                  * Relookup in case pager changed page. Pager
1168                                  * is responsible for disposition of old page
1169                                  * if moved.
1170                                  *
1171                                  * XXX other code segments do relookups too.
1172                                  * It's a bad abstraction that needs to be
1173                                  * fixed/removed.
1174                                  */
1175                                 fs->m = vm_page_lookup(fs->object, pindex);
1176                                 if (fs->m == NULL) {
1177                                         unlock_and_deallocate(fs);
1178                                         return (KERN_TRY_AGAIN);
1179                                 }
1180
1181                                 ++fs->hardfault;
1182                                 break; /* break to PAGE HAS BEEN FOUND */
1183                         }
1184
1185                         /*
1186                          * Remove the bogus page (which does not exist at this
1187                          * object/offset); before doing so, we must get back
1188                          * our object lock to preserve our invariant.
1189                          *
1190                          * Also wake up any other process that may want to bring
1191                          * in this page.
1192                          *
1193                          * If this is the top-level object, we must leave the
1194                          * busy page to prevent another process from rushing
1195                          * past us, and inserting the page in that object at
1196                          * the same time that we are.
1197                          */
1198                         if (rv == VM_PAGER_ERROR) {
1199                                 if (curproc)
1200                                         kprintf("vm_fault: pager read error, pid %d (%s)\n", curproc->p_pid, curproc->p_comm);
1201                                 else
1202                                         kprintf("vm_fault: pager read error, thread %p (%s)\n", curthread, curproc->p_comm);
1203                         }
1204                         /*
1205                          * Data outside the range of the pager or an I/O error
1206                          *
1207                          * The page may have been wired during the pagein,
1208                          * e.g. by the buffer cache, and cannot simply be
1209                          * freed.  Call vnode_pager_freepag() to deal with it.
1210                          */
1211                         /*
1212                          * XXX - the check for kernel_map is a kludge to work
1213                          * around having the machine panic on a kernel space
1214                          * fault w/ I/O error.
1215                          */
1216                         if (((fs->map != &kernel_map) && (rv == VM_PAGER_ERROR)) ||
1217                                 (rv == VM_PAGER_BAD)) {
1218                                 vnode_pager_freepage(fs->m);
1219                                 fs->m = NULL;
1220                                 unlock_and_deallocate(fs);
1221                                 if (rv == VM_PAGER_ERROR)
1222                                         return (KERN_FAILURE);
1223                                 else
1224                                         return (KERN_PROTECTION_FAILURE);
1225                                 /* NOT REACHED */
1226                         }
1227                         if (fs->object != fs->first_object) {
1228                                 vnode_pager_freepage(fs->m);
1229                                 fs->m = NULL;
1230                                 /*
1231                                  * XXX - we cannot just fall out at this
1232                                  * point, m has been freed and is invalid!
1233                                  */
1234                         }
1235                 }
1236
1237                 /*
1238                  * We get here if the object has a default pager (or unwiring) 
1239                  * or the pager doesn't have the page.
1240                  */
1241                 if (fs->object == fs->first_object)
1242                         fs->first_m = fs->m;
1243
1244                 /*
1245                  * Move on to the next object.  Lock the next object before
1246                  * unlocking the current one.
1247                  */
1248                 pindex += OFF_TO_IDX(fs->object->backing_object_offset);
1249                 next_object = fs->object->backing_object;
1250                 if (next_object == NULL) {
1251                         /*
1252                          * If there's no object left, fill the page in the top
1253                          * object with zeros.
1254                          */
1255                         if (fs->object != fs->first_object) {
1256                                 vm_object_pip_wakeup(fs->object);
1257
1258                                 fs->object = fs->first_object;
1259                                 pindex = first_pindex;
1260                                 fs->m = fs->first_m;
1261                         }
1262                         fs->first_m = NULL;
1263
1264                         /*
1265                          * Zero the page if necessary and mark it valid.
1266                          */
1267                         if ((fs->m->flags & PG_ZERO) == 0) {
1268                                 vm_page_zero_fill(fs->m);
1269                         } else {
1270                                 mycpu->gd_cnt.v_ozfod++;
1271                         }
1272                         mycpu->gd_cnt.v_zfod++;
1273                         fs->m->valid = VM_PAGE_BITS_ALL;
1274                         break;  /* break to PAGE HAS BEEN FOUND */
1275                 } else {
1276                         if (fs->object != fs->first_object) {
1277                                 vm_object_pip_wakeup(fs->object);
1278                         }
1279                         KASSERT(fs->object != next_object, ("object loop %p", next_object));
1280                         fs->object = next_object;
1281                         vm_object_pip_add(fs->object, 1);
1282                 }
1283         }
1284
1285         KASSERT((fs->m->flags & PG_BUSY) != 0,
1286                 ("vm_fault: not busy after main loop"));
1287
1288         /*
1289          * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock
1290          * is held.]
1291          */
1292
1293         /*
1294          * If the page is being written, but isn't already owned by the
1295          * top-level object, we have to copy it into a new page owned by the
1296          * top-level object.
1297          */
1298         if (fs->object != fs->first_object) {
1299                 /*
1300                  * We only really need to copy if we want to write it.
1301                  */
1302                 if (fault_type & VM_PROT_WRITE) {
1303                         /*
1304                          * This allows pages to be virtually copied from a 
1305                          * backing_object into the first_object, where the 
1306                          * backing object has no other refs to it, and cannot
1307                          * gain any more refs.  Instead of a bcopy, we just 
1308                          * move the page from the backing object to the 
1309                          * first object.  Note that we must mark the page 
1310                          * dirty in the first object so that it will go out 
1311                          * to swap when needed.
1312                          */
1313                         if (
1314                                 /*
1315                                  * Map, if present, has not changed
1316                                  */
1317                                 (fs->map == NULL ||
1318                                 fs->map_generation == fs->map->timestamp) &&
1319                                 /*
1320                                  * Only one shadow object
1321                                  */
1322                                 (fs->object->shadow_count == 1) &&
1323                                 /*
1324                                  * No COW refs, except us
1325                                  */
1326                                 (fs->object->ref_count == 1) &&
1327                                 /*
1328                                  * No one else can look this object up
1329                                  */
1330                                 (fs->object->handle == NULL) &&
1331                                 /*
1332                                  * No other ways to look the object up
1333                                  */
1334                                 ((fs->object->type == OBJT_DEFAULT) ||
1335                                  (fs->object->type == OBJT_SWAP)) &&
1336                                 /*
1337                                  * We don't chase down the shadow chain
1338                                  */
1339                                 (fs->object == fs->first_object->backing_object) &&
1340
1341                                 /*
1342                                  * grab the lock if we need to
1343                                  */
1344                                 (fs->lookup_still_valid ||
1345                                  fs->map == NULL ||
1346                                  lockmgr(&fs->map->lock, LK_EXCLUSIVE|LK_NOWAIT) == 0)
1347                             ) {
1348                                 
1349                                 fs->lookup_still_valid = 1;
1350                                 /*
1351                                  * get rid of the unnecessary page
1352                                  */
1353                                 vm_page_protect(fs->first_m, VM_PROT_NONE);
1354                                 vm_page_free(fs->first_m);
1355                                 fs->first_m = NULL;
1356
1357                                 /*
1358                                  * grab the page and put it into the 
1359                                  * process'es object.  The page is 
1360                                  * automatically made dirty.
1361                                  */
1362                                 vm_page_rename(fs->m, fs->first_object, first_pindex);
1363                                 fs->first_m = fs->m;
1364                                 vm_page_busy(fs->first_m);
1365                                 fs->m = NULL;
1366                                 mycpu->gd_cnt.v_cow_optim++;
1367                         } else {
1368                                 /*
1369                                  * Oh, well, lets copy it.
1370                                  */
1371                                 vm_page_copy(fs->m, fs->first_m);
1372                                 vm_page_event(fs->m, VMEVENT_COW);
1373                         }
1374
1375                         if (fs->m) {
1376                                 /*
1377                                  * We no longer need the old page or object.
1378                                  */
1379                                 release_page(fs);
1380                         }
1381
1382                         /*
1383                          * fs->object != fs->first_object due to above 
1384                          * conditional
1385                          */
1386                         vm_object_pip_wakeup(fs->object);
1387
1388                         /*
1389                          * Only use the new page below...
1390                          */
1391
1392                         mycpu->gd_cnt.v_cow_faults++;
1393                         fs->m = fs->first_m;
1394                         fs->object = fs->first_object;
1395                         pindex = first_pindex;
1396                 } else {
1397                         /*
1398                          * If it wasn't a write fault avoid having to copy
1399                          * the page by mapping it read-only.
1400                          */
1401                         fs->prot &= ~VM_PROT_WRITE;
1402                 }
1403         }
1404
1405         /*
1406          * We may have had to unlock a map to do I/O.  If we did then
1407          * lookup_still_valid will be FALSE.  If the map generation count
1408          * also changed then all sorts of things could have happened while
1409          * we were doing the I/O and we need to retry.
1410          */
1411
1412         if (!fs->lookup_still_valid &&
1413             fs->map != NULL &&
1414             (fs->map->timestamp != fs->map_generation)) {
1415                 release_page(fs);
1416                 unlock_and_deallocate(fs);
1417                 return (KERN_TRY_AGAIN);
1418         }
1419
1420         /*
1421          * If the fault is a write, we know that this page is being
1422          * written NOW so dirty it explicitly to save on pmap_is_modified()
1423          * calls later.
1424          *
1425          * If this is a NOSYNC mmap we do not want to set PG_NOSYNC
1426          * if the page is already dirty to prevent data written with
1427          * the expectation of being synced from not being synced.
1428          * Likewise if this entry does not request NOSYNC then make
1429          * sure the page isn't marked NOSYNC.  Applications sharing
1430          * data should use the same flags to avoid ping ponging.
1431          *
1432          * Also tell the backing pager, if any, that it should remove
1433          * any swap backing since the page is now dirty.
1434          */
1435         if (fs->prot & VM_PROT_WRITE) {
1436                 vm_object_set_writeable_dirty(fs->m->object);
1437                 if (fs->entry->eflags & MAP_ENTRY_NOSYNC) {
1438                         if (fs->m->dirty == 0)
1439                                 vm_page_flag_set(fs->m, PG_NOSYNC);
1440                 } else {
1441                         vm_page_flag_clear(fs->m, PG_NOSYNC);
1442                 }
1443                 if (fs->fault_flags & VM_FAULT_DIRTY) {
1444                         crit_enter();
1445                         vm_page_dirty(fs->m);
1446                         vm_pager_page_unswapped(fs->m);
1447                         crit_exit();
1448                 }
1449         }
1450
1451         /*
1452          * Page had better still be busy.  We are still locked up and 
1453          * fs->object will have another PIP reference if it is not equal
1454          * to fs->first_object.
1455          */
1456         KASSERT(fs->m->flags & PG_BUSY,
1457                 ("vm_fault: page %p not busy!", fs->m));
1458
1459         /*
1460          * Sanity check: page must be completely valid or it is not fit to
1461          * map into user space.  vm_pager_get_pages() ensures this.
1462          */
1463         if (fs->m->valid != VM_PAGE_BITS_ALL) {
1464                 vm_page_zero_invalid(fs->m, TRUE);
1465                 kprintf("Warning: page %p partially invalid on fault\n", fs->m);
1466         }
1467
1468         return (KERN_SUCCESS);
1469 }
1470
1471 /*
1472  * Wire down a range of virtual addresses in a map.  The entry in question
1473  * should be marked in-transition and the map must be locked.  We must
1474  * release the map temporarily while faulting-in the page to avoid a
1475  * deadlock.  Note that the entry may be clipped while we are blocked but
1476  * will never be freed.
1477  */
1478 int
1479 vm_fault_wire(vm_map_t map, vm_map_entry_t entry, boolean_t user_wire)
1480 {
1481         boolean_t fictitious;
1482         vm_offset_t start;
1483         vm_offset_t end;
1484         vm_offset_t va;
1485         vm_paddr_t pa;
1486         pmap_t pmap;
1487         int rv;
1488
1489         pmap = vm_map_pmap(map);
1490         start = entry->start;
1491         end = entry->end;
1492         fictitious = entry->object.vm_object &&
1493                         (entry->object.vm_object->type == OBJT_DEVICE);
1494
1495         vm_map_unlock(map);
1496         map->timestamp++;
1497
1498         /*
1499          * We simulate a fault to get the page and enter it in the physical
1500          * map.
1501          */
1502         for (va = start; va < end; va += PAGE_SIZE) {
1503                 if (user_wire) {
1504                         rv = vm_fault(map, va, VM_PROT_READ, 
1505                                         VM_FAULT_USER_WIRE);
1506                 } else {
1507                         rv = vm_fault(map, va, VM_PROT_READ|VM_PROT_WRITE,
1508                                         VM_FAULT_CHANGE_WIRING);
1509                 }
1510                 if (rv) {
1511                         while (va > start) {
1512                                 va -= PAGE_SIZE;
1513                                 if ((pa = pmap_extract(pmap, va)) == 0)
1514                                         continue;
1515                                 pmap_change_wiring(pmap, va, FALSE);
1516                                 if (!fictitious)
1517                                         vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1);
1518                         }
1519                         vm_map_lock(map);
1520                         return (rv);
1521                 }
1522         }
1523         vm_map_lock(map);
1524         return (KERN_SUCCESS);
1525 }
1526
1527 /*
1528  * Unwire a range of virtual addresses in a map.  The map should be
1529  * locked.
1530  */
1531 void
1532 vm_fault_unwire(vm_map_t map, vm_map_entry_t entry)
1533 {
1534         boolean_t fictitious;
1535         vm_offset_t start;
1536         vm_offset_t end;
1537         vm_offset_t va;
1538         vm_paddr_t pa;
1539         pmap_t pmap;
1540
1541         pmap = vm_map_pmap(map);
1542         start = entry->start;
1543         end = entry->end;
1544         fictitious = entry->object.vm_object &&
1545                         (entry->object.vm_object->type == OBJT_DEVICE);
1546
1547         /*
1548          * Since the pages are wired down, we must be able to get their
1549          * mappings from the physical map system.
1550          */
1551         for (va = start; va < end; va += PAGE_SIZE) {
1552                 pa = pmap_extract(pmap, va);
1553                 if (pa != 0) {
1554                         pmap_change_wiring(pmap, va, FALSE);
1555                         if (!fictitious)
1556                                 vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1);
1557                 }
1558         }
1559 }
1560
1561 /*
1562  * Reduce the rate at which memory is allocated to a process based
1563  * on the perceived load on the VM system. As the load increases
1564  * the allocation burst rate goes down and the delay increases. 
1565  *
1566  * Rate limiting does not apply when faulting active or inactive
1567  * pages.  When faulting 'cache' pages, rate limiting only applies
1568  * if the system currently has a severe page deficit.
1569  *
1570  * XXX vm_pagesupply should be increased when a page is freed.
1571  *
1572  * We sleep up to 1/10 of a second.
1573  */
1574 static int
1575 vm_fault_ratelimit(struct vmspace *vmspace)
1576 {
1577         if (vm_load_enable == 0)
1578                 return(0);
1579         if (vmspace->vm_pagesupply > 0) {
1580                 --vmspace->vm_pagesupply;
1581                 return(0);
1582         }
1583 #ifdef INVARIANTS
1584         if (vm_load_debug) {
1585                 kprintf("load %-4d give %d pgs, wait %d, pid %-5d (%s)\n",
1586                         vm_load, 
1587                         (1000 - vm_load ) / 10, vm_load * hz / 10000,
1588                         curproc->p_pid, curproc->p_comm);
1589         }
1590 #endif
1591         vmspace->vm_pagesupply = (1000 - vm_load) / 10;
1592         return(vm_load * hz / 10000);
1593 }
1594
1595 /*
1596  *      Routine:
1597  *              vm_fault_copy_entry
1598  *      Function:
1599  *              Copy all of the pages from a wired-down map entry to another.
1600  *
1601  *      In/out conditions:
1602  *              The source and destination maps must be locked for write.
1603  *              The source map entry must be wired down (or be a sharing map
1604  *              entry corresponding to a main map entry that is wired down).
1605  */
1606
1607 void
1608 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
1609     vm_map_entry_t dst_entry, vm_map_entry_t src_entry)
1610 {
1611         vm_object_t dst_object;
1612         vm_object_t src_object;
1613         vm_ooffset_t dst_offset;
1614         vm_ooffset_t src_offset;
1615         vm_prot_t prot;
1616         vm_offset_t vaddr;
1617         vm_page_t dst_m;
1618         vm_page_t src_m;
1619
1620 #ifdef  lint
1621         src_map++;
1622 #endif  /* lint */
1623
1624         src_object = src_entry->object.vm_object;
1625         src_offset = src_entry->offset;
1626
1627         /*
1628          * Create the top-level object for the destination entry. (Doesn't
1629          * actually shadow anything - we copy the pages directly.)
1630          */
1631         vm_map_entry_allocate_object(dst_entry);
1632         dst_object = dst_entry->object.vm_object;
1633
1634         prot = dst_entry->max_protection;
1635
1636         /*
1637          * Loop through all of the pages in the entry's range, copying each
1638          * one from the source object (it should be there) to the destination
1639          * object.
1640          */
1641         for (vaddr = dst_entry->start, dst_offset = 0;
1642             vaddr < dst_entry->end;
1643             vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) {
1644
1645                 /*
1646                  * Allocate a page in the destination object
1647                  */
1648                 do {
1649                         dst_m = vm_page_alloc(dst_object,
1650                                 OFF_TO_IDX(dst_offset), VM_ALLOC_NORMAL);
1651                         if (dst_m == NULL) {
1652                                 vm_wait();
1653                         }
1654                 } while (dst_m == NULL);
1655
1656                 /*
1657                  * Find the page in the source object, and copy it in.
1658                  * (Because the source is wired down, the page will be in
1659                  * memory.)
1660                  */
1661                 src_m = vm_page_lookup(src_object,
1662                         OFF_TO_IDX(dst_offset + src_offset));
1663                 if (src_m == NULL)
1664                         panic("vm_fault_copy_wired: page missing");
1665
1666                 vm_page_copy(src_m, dst_m);
1667                 vm_page_event(src_m, VMEVENT_COW);
1668
1669                 /*
1670                  * Enter it in the pmap...
1671                  */
1672
1673                 vm_page_flag_clear(dst_m, PG_ZERO);
1674                 pmap_enter(dst_map->pmap, vaddr, dst_m, prot, FALSE);
1675
1676                 /*
1677                  * Mark it no longer busy, and put it on the active list.
1678                  */
1679                 vm_page_activate(dst_m);
1680                 vm_page_wakeup(dst_m);
1681         }
1682 }
1683
1684
1685 /*
1686  * This routine checks around the requested page for other pages that
1687  * might be able to be faulted in.  This routine brackets the viable
1688  * pages for the pages to be paged in.
1689  *
1690  * Inputs:
1691  *      m, rbehind, rahead
1692  *
1693  * Outputs:
1694  *  marray (array of vm_page_t), reqpage (index of requested page)
1695  *
1696  * Return value:
1697  *  number of pages in marray
1698  */
1699 static int
1700 vm_fault_additional_pages(vm_page_t m, int rbehind, int rahead,
1701     vm_page_t *marray, int *reqpage)
1702 {
1703         int i,j;
1704         vm_object_t object;
1705         vm_pindex_t pindex, startpindex, endpindex, tpindex;
1706         vm_page_t rtm;
1707         int cbehind, cahead;
1708
1709         object = m->object;
1710         pindex = m->pindex;
1711
1712         /*
1713          * we don't fault-ahead for device pager
1714          */
1715         if (object->type == OBJT_DEVICE) {
1716                 *reqpage = 0;
1717                 marray[0] = m;
1718                 return 1;
1719         }
1720
1721         /*
1722          * if the requested page is not available, then give up now
1723          */
1724         if (!vm_pager_has_page(object, pindex, &cbehind, &cahead)) {
1725                 *reqpage = 0;   /* not used by caller, fix compiler warn */
1726                 return 0;
1727         }
1728
1729         if ((cbehind == 0) && (cahead == 0)) {
1730                 *reqpage = 0;
1731                 marray[0] = m;
1732                 return 1;
1733         }
1734
1735         if (rahead > cahead) {
1736                 rahead = cahead;
1737         }
1738
1739         if (rbehind > cbehind) {
1740                 rbehind = cbehind;
1741         }
1742
1743         /*
1744          * try to do any readahead that we might have free pages for.
1745          */
1746         if ((rahead + rbehind) >
1747                 ((vmstats.v_free_count + vmstats.v_cache_count) - vmstats.v_free_reserved)) {
1748                 pagedaemon_wakeup();
1749                 marray[0] = m;
1750                 *reqpage = 0;
1751                 return 1;
1752         }
1753
1754         /*
1755          * scan backward for the read behind pages -- in memory 
1756          *
1757          * Assume that if the page is not found an interrupt will not
1758          * create it.  Theoretically interrupts can only remove (busy)
1759          * pages, not create new associations.
1760          */
1761         if (pindex > 0) {
1762                 if (rbehind > pindex) {
1763                         rbehind = pindex;
1764                         startpindex = 0;
1765                 } else {
1766                         startpindex = pindex - rbehind;
1767                 }
1768
1769                 crit_enter();
1770                 for ( tpindex = pindex - 1; tpindex >= startpindex; tpindex -= 1) {
1771                         if (vm_page_lookup( object, tpindex)) {
1772                                 startpindex = tpindex + 1;
1773                                 break;
1774                         }
1775                         if (tpindex == 0)
1776                                 break;
1777                 }
1778
1779                 for(i = 0, tpindex = startpindex; tpindex < pindex; i++, tpindex++) {
1780
1781                         rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL);
1782                         if (rtm == NULL) {
1783                                 crit_exit();
1784                                 for (j = 0; j < i; j++) {
1785                                         vm_page_free(marray[j]);
1786                                 }
1787                                 marray[0] = m;
1788                                 *reqpage = 0;
1789                                 return 1;
1790                         }
1791
1792                         marray[i] = rtm;
1793                 }
1794                 crit_exit();
1795         } else {
1796                 startpindex = 0;
1797                 i = 0;
1798         }
1799
1800         marray[i] = m;
1801         /* page offset of the required page */
1802         *reqpage = i;
1803
1804         tpindex = pindex + 1;
1805         i++;
1806
1807         /*
1808          * scan forward for the read ahead pages
1809          */
1810         endpindex = tpindex + rahead;
1811         if (endpindex > object->size)
1812                 endpindex = object->size;
1813
1814         crit_enter();
1815         for( ; tpindex < endpindex; i++, tpindex++) {
1816
1817                 if (vm_page_lookup(object, tpindex)) {
1818                         break;
1819                 }
1820
1821                 rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL);
1822                 if (rtm == NULL) {
1823                         break;
1824                 }
1825
1826                 marray[i] = rtm;
1827         }
1828         crit_exit();
1829
1830         /* return number of bytes of pages */
1831         return i;
1832 }