kernel - Improve VM fault performance for sequential access
[dragonfly.git] / sys / vm / vm_fault.c
1 /*
2  * Copyright (c) 1991, 1993
3  *      The Regents of the University of California.  All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  *
9  *
10  * This code is derived from software contributed to Berkeley by
11  * The Mach Operating System project at Carnegie-Mellon University.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. All advertising materials mentioning features or use of this software
22  *    must display the following acknowledgement:
23  *      This product includes software developed by the University of
24  *      California, Berkeley and its contributors.
25  * 4. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  *      from: @(#)vm_fault.c    8.4 (Berkeley) 1/12/94
42  *
43  *
44  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
45  * All rights reserved.
46  *
47  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
48  *
49  * Permission to use, copy, modify and distribute this software and
50  * its documentation is hereby granted, provided that both the copyright
51  * notice and this permission notice appear in all copies of the
52  * software, derivative works or modified versions, and any portions
53  * thereof, and that both notices appear in supporting documentation.
54  *
55  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
56  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
57  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
58  *
59  * Carnegie Mellon requests users of this software to return to
60  *
61  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
62  *  School of Computer Science
63  *  Carnegie Mellon University
64  *  Pittsburgh PA 15213-3890
65  *
66  * any improvements or extensions that they make and grant Carnegie the
67  * rights to redistribute these changes.
68  *
69  * $FreeBSD: src/sys/vm/vm_fault.c,v 1.108.2.8 2002/02/26 05:49:27 silby Exp $
70  * $DragonFly: src/sys/vm/vm_fault.c,v 1.47 2008/07/01 02:02:56 dillon Exp $
71  */
72
73 /*
74  *      Page fault handling module.
75  */
76
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/kernel.h>
80 #include <sys/proc.h>
81 #include <sys/vnode.h>
82 #include <sys/resourcevar.h>
83 #include <sys/vmmeter.h>
84 #include <sys/vkernel.h>
85 #include <sys/sfbuf.h>
86 #include <sys/lock.h>
87 #include <sys/sysctl.h>
88
89 #include <vm/vm.h>
90 #include <vm/vm_param.h>
91 #include <vm/pmap.h>
92 #include <vm/vm_map.h>
93 #include <vm/vm_object.h>
94 #include <vm/vm_page.h>
95 #include <vm/vm_pageout.h>
96 #include <vm/vm_kern.h>
97 #include <vm/vm_pager.h>
98 #include <vm/vnode_pager.h>
99 #include <vm/vm_extern.h>
100
101 #include <sys/thread2.h>
102 #include <vm/vm_page2.h>
103
104 #define VM_FAULT_READ_AHEAD 8
105 #define VM_FAULT_READ_BEHIND 7
106 #define VM_FAULT_READ (VM_FAULT_READ_AHEAD+VM_FAULT_READ_BEHIND+1)
107
108 struct faultstate {
109         vm_page_t m;
110         vm_object_t object;
111         vm_pindex_t pindex;
112         vm_prot_t prot;
113         vm_page_t first_m;
114         vm_object_t first_object;
115         vm_prot_t first_prot;
116         vm_map_t map;
117         vm_map_entry_t entry;
118         int lookup_still_valid;
119         int didlimit;
120         int hardfault;
121         int fault_flags;
122         int map_generation;
123         boolean_t wired;
124         struct vnode *vp;
125 };
126
127 static int burst_fault = 1;
128 SYSCTL_INT(_vm, OID_AUTO, burst_fault, CTLFLAG_RW, &burst_fault, 0, "");
129 static int debug_cluster = 0;
130 SYSCTL_INT(_vm, OID_AUTO, debug_cluster, CTLFLAG_RW, &debug_cluster, 0, "");
131
132 static int vm_fault_object(struct faultstate *, vm_pindex_t, vm_prot_t);
133 static int vm_fault_vpagetable(struct faultstate *, vm_pindex_t *, vpte_t, int);
134 static int vm_fault_additional_pages (vm_page_t, int, int, vm_page_t *, int *);
135 static int vm_fault_ratelimit(struct vmspace *);
136
137 static __inline void
138 release_page(struct faultstate *fs)
139 {
140         vm_page_deactivate(fs->m);
141         vm_page_wakeup(fs->m);
142         fs->m = NULL;
143 }
144
145 static __inline void
146 unlock_map(struct faultstate *fs)
147 {
148         if (fs->lookup_still_valid && fs->map) {
149                 vm_map_lookup_done(fs->map, fs->entry, 0);
150                 fs->lookup_still_valid = FALSE;
151         }
152 }
153
154 /*
155  * Clean up after a successful call to vm_fault_object() so another call
156  * to vm_fault_object() can be made.
157  */
158 static void
159 _cleanup_successful_fault(struct faultstate *fs, int relock)
160 {
161         if (fs->object != fs->first_object) {
162                 vm_page_free(fs->first_m);
163                 vm_object_pip_wakeup(fs->object);
164                 fs->first_m = NULL;
165         }
166         fs->object = fs->first_object;
167         if (relock && fs->lookup_still_valid == FALSE) {
168                 if (fs->map)
169                         vm_map_lock_read(fs->map);
170                 fs->lookup_still_valid = TRUE;
171         }
172 }
173
174 static void
175 _unlock_things(struct faultstate *fs, int dealloc)
176 {
177         vm_object_pip_wakeup(fs->first_object);
178         _cleanup_successful_fault(fs, 0);
179         if (dealloc) {
180                 vm_object_deallocate(fs->first_object);
181                 fs->first_object = NULL;
182         }
183         unlock_map(fs); 
184         if (fs->vp != NULL) { 
185                 vput(fs->vp);
186                 fs->vp = NULL;
187         }
188 }
189
190 #define unlock_things(fs) _unlock_things(fs, 0)
191 #define unlock_and_deallocate(fs) _unlock_things(fs, 1)
192 #define cleanup_successful_fault(fs) _cleanup_successful_fault(fs, 1)
193
194 /*
195  * TRYPAGER 
196  *
197  * Determine if the pager for the current object *might* contain the page.
198  *
199  * We only need to try the pager if this is not a default object (default
200  * objects are zero-fill and have no real pager), and if we are not taking
201  * a wiring fault or if the FS entry is wired.
202  */
203 #define TRYPAGER(fs)    \
204                 (fs->object->type != OBJT_DEFAULT && \
205                 (((fs->fault_flags & VM_FAULT_WIRE_MASK) == 0) || fs->wired))
206
207 /*
208  * vm_fault:
209  *
210  * Handle a page fault occuring at the given address, requiring the given
211  * permissions, in the map specified.  If successful, the page is inserted
212  * into the associated physical map.
213  *
214  * NOTE: The given address should be truncated to the proper page address.
215  *
216  * KERN_SUCCESS is returned if the page fault is handled; otherwise,
217  * a standard error specifying why the fault is fatal is returned.
218  *
219  * The map in question must be referenced, and remains so.
220  * The caller may hold no locks.
221  */
222 int
223 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags)
224 {
225         int result;
226         vm_pindex_t first_pindex;
227         struct faultstate fs;
228
229         mycpu->gd_cnt.v_vm_faults++;
230
231         fs.didlimit = 0;
232         fs.hardfault = 0;
233         fs.fault_flags = fault_flags;
234
235 RetryFault:
236         /*
237          * Find the vm_map_entry representing the backing store and resolve
238          * the top level object and page index.  This may have the side
239          * effect of executing a copy-on-write on the map entry and/or
240          * creating a shadow object, but will not COW any actual VM pages.
241          *
242          * On success fs.map is left read-locked and various other fields 
243          * are initialized but not otherwise referenced or locked.
244          *
245          * NOTE!  vm_map_lookup will try to upgrade the fault_type to
246          * VM_FAULT_WRITE if the map entry is a virtual page table and also
247          * writable, so we can set the 'A'accessed bit in the virtual page
248          * table entry.
249          */
250         fs.map = map;
251         result = vm_map_lookup(&fs.map, vaddr, fault_type,
252                                &fs.entry, &fs.first_object,
253                                &first_pindex, &fs.first_prot, &fs.wired);
254
255         /*
256          * If the lookup failed or the map protections are incompatible,
257          * the fault generally fails.  However, if the caller is trying
258          * to do a user wiring we have more work to do.
259          */
260         if (result != KERN_SUCCESS) {
261                 if (result != KERN_PROTECTION_FAILURE)
262                         return result;
263                 if ((fs.fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE)
264                         return result;
265
266                 /*
267                  * If we are user-wiring a r/w segment, and it is COW, then
268                  * we need to do the COW operation.  Note that we don't
269                  * currently COW RO sections now, because it is NOT desirable
270                  * to COW .text.  We simply keep .text from ever being COW'ed
271                  * and take the heat that one cannot debug wired .text sections.
272                  */
273                 result = vm_map_lookup(&fs.map, vaddr,
274                                        VM_PROT_READ|VM_PROT_WRITE|
275                                         VM_PROT_OVERRIDE_WRITE,
276                                        &fs.entry, &fs.first_object,
277                                        &first_pindex, &fs.first_prot,
278                                        &fs.wired);
279                 if (result != KERN_SUCCESS)
280                         return result;
281
282                 /*
283                  * If we don't COW now, on a user wire, the user will never
284                  * be able to write to the mapping.  If we don't make this
285                  * restriction, the bookkeeping would be nearly impossible.
286                  */
287                 if ((fs.entry->protection & VM_PROT_WRITE) == 0)
288                         fs.entry->max_protection &= ~VM_PROT_WRITE;
289         }
290
291         /*
292          * fs.map is read-locked
293          *
294          * Misc checks.  Save the map generation number to detect races.
295          */
296         fs.map_generation = fs.map->timestamp;
297
298         if (fs.entry->eflags & MAP_ENTRY_NOFAULT) {
299                 panic("vm_fault: fault on nofault entry, addr: %lx",
300                     (u_long)vaddr);
301         }
302
303         /*
304          * A system map entry may return a NULL object.  No object means
305          * no pager means an unrecoverable kernel fault.
306          */
307         if (fs.first_object == NULL) {
308                 panic("vm_fault: unrecoverable fault at %p in entry %p",
309                         (void *)vaddr, fs.entry);
310         }
311
312         /*
313          * Make a reference to this object to prevent its disposal while we
314          * are messing with it.  Once we have the reference, the map is free
315          * to be diddled.  Since objects reference their shadows (and copies),
316          * they will stay around as well.
317          *
318          * Bump the paging-in-progress count to prevent size changes (e.g.
319          * truncation operations) during I/O.  This must be done after
320          * obtaining the vnode lock in order to avoid possible deadlocks.
321          */
322         vm_object_reference(fs.first_object);
323         fs.vp = vnode_pager_lock(fs.first_object);
324         vm_object_pip_add(fs.first_object, 1);
325
326         fs.lookup_still_valid = TRUE;
327         fs.first_m = NULL;
328         fs.object = fs.first_object;    /* so unlock_and_deallocate works */
329
330         /*
331          * If the entry is wired we cannot change the page protection.
332          */
333         if (fs.wired)
334                 fault_type = fs.first_prot;
335
336         /*
337          * The page we want is at (first_object, first_pindex), but if the
338          * vm_map_entry is VM_MAPTYPE_VPAGETABLE we have to traverse the
339          * page table to figure out the actual pindex.
340          *
341          * NOTE!  DEVELOPMENT IN PROGRESS, THIS IS AN INITIAL IMPLEMENTATION
342          * ONLY
343          */
344         if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) {
345                 result = vm_fault_vpagetable(&fs, &first_pindex,
346                                              fs.entry->aux.master_pde,
347                                              fault_type);
348                 if (result == KERN_TRY_AGAIN)
349                         goto RetryFault;
350                 if (result != KERN_SUCCESS)
351                         return (result);
352         }
353
354         /*
355          * Now we have the actual (object, pindex), fault in the page.  If
356          * vm_fault_object() fails it will unlock and deallocate the FS
357          * data.   If it succeeds everything remains locked and fs->object
358          * will have an additinal PIP count if it is not equal to
359          * fs->first_object
360          *
361          * vm_fault_object will set fs->prot for the pmap operation.  It is
362          * allowed to set VM_PROT_WRITE if fault_type == VM_PROT_READ if the
363          * page can be safely written.  However, it will force a read-only
364          * mapping for a read fault if the memory is managed by a virtual
365          * page table.
366          */
367         result = vm_fault_object(&fs, first_pindex, fault_type);
368
369         if (result == KERN_TRY_AGAIN)
370                 goto RetryFault;
371         if (result != KERN_SUCCESS)
372                 return (result);
373
374         /*
375          * On success vm_fault_object() does not unlock or deallocate, and fs.m
376          * will contain a busied page.
377          *
378          * Enter the page into the pmap and do pmap-related adjustments.
379          */
380         unlock_things(&fs);
381         pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, fs.wired);
382
383         if (((fs.fault_flags & VM_FAULT_WIRE_MASK) == 0) && (fs.wired == 0)) {
384                 pmap_prefault(fs.map->pmap, vaddr, fs.entry);
385         }
386
387         vm_page_flag_clear(fs.m, PG_ZERO);
388         vm_page_flag_set(fs.m, PG_REFERENCED);
389
390         /*
391          * If the page is not wired down, then put it where the pageout daemon
392          * can find it.
393          */
394         if (fs.fault_flags & VM_FAULT_WIRE_MASK) {
395                 if (fs.wired)
396                         vm_page_wire(fs.m);
397                 else
398                         vm_page_unwire(fs.m, 1);
399         } else {
400                 vm_page_activate(fs.m);
401         }
402
403         if (curthread->td_lwp) {
404                 if (fs.hardfault) {
405                         curthread->td_lwp->lwp_ru.ru_majflt++;
406                 } else {
407                         curthread->td_lwp->lwp_ru.ru_minflt++;
408                 }
409         }
410
411         /*
412          * Unlock everything, and return
413          */
414         vm_page_wakeup(fs.m);
415         vm_object_deallocate(fs.first_object);
416
417         return (KERN_SUCCESS);
418 }
419
420 /*
421  * Fault in the specified virtual address in the current process map, 
422  * returning a held VM page or NULL.  See vm_fault_page() for more 
423  * information.
424  */
425 vm_page_t
426 vm_fault_page_quick(vm_offset_t va, vm_prot_t fault_type, int *errorp)
427 {
428         struct lwp *lp = curthread->td_lwp;
429         vm_page_t m;
430
431         m = vm_fault_page(&lp->lwp_vmspace->vm_map, va, 
432                           fault_type, VM_FAULT_NORMAL, errorp);
433         return(m);
434 }
435
436 /*
437  * Fault in the specified virtual address in the specified map, doing all
438  * necessary manipulation of the object store and all necessary I/O.  Return
439  * a held VM page or NULL, and set *errorp.  The related pmap is not
440  * updated.
441  *
442  * The returned page will be properly dirtied if VM_PROT_WRITE was specified,
443  * and marked PG_REFERENCED as well.
444  *
445  * If the page cannot be faulted writable and VM_PROT_WRITE was specified, an
446  * error will be returned.
447  */
448 vm_page_t
449 vm_fault_page(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
450               int fault_flags, int *errorp)
451 {
452         vm_pindex_t first_pindex;
453         struct faultstate fs;
454         int result;
455         vm_prot_t orig_fault_type = fault_type;
456
457         mycpu->gd_cnt.v_vm_faults++;
458
459         fs.didlimit = 0;
460         fs.hardfault = 0;
461         fs.fault_flags = fault_flags;
462         KKASSERT((fault_flags & VM_FAULT_WIRE_MASK) == 0);
463
464 RetryFault:
465         /*
466          * Find the vm_map_entry representing the backing store and resolve
467          * the top level object and page index.  This may have the side
468          * effect of executing a copy-on-write on the map entry and/or
469          * creating a shadow object, but will not COW any actual VM pages.
470          *
471          * On success fs.map is left read-locked and various other fields 
472          * are initialized but not otherwise referenced or locked.
473          *
474          * NOTE!  vm_map_lookup will upgrade the fault_type to VM_FAULT_WRITE
475          * if the map entry is a virtual page table and also writable,
476          * so we can set the 'A'accessed bit in the virtual page table entry.
477          */
478         fs.map = map;
479         result = vm_map_lookup(&fs.map, vaddr, fault_type,
480                                &fs.entry, &fs.first_object,
481                                &first_pindex, &fs.first_prot, &fs.wired);
482
483         if (result != KERN_SUCCESS) {
484                 *errorp = result;
485                 return (NULL);
486         }
487
488         /*
489          * fs.map is read-locked
490          *
491          * Misc checks.  Save the map generation number to detect races.
492          */
493         fs.map_generation = fs.map->timestamp;
494
495         if (fs.entry->eflags & MAP_ENTRY_NOFAULT) {
496                 panic("vm_fault: fault on nofault entry, addr: %lx",
497                     (u_long)vaddr);
498         }
499
500         /*
501          * A system map entry may return a NULL object.  No object means
502          * no pager means an unrecoverable kernel fault.
503          */
504         if (fs.first_object == NULL) {
505                 panic("vm_fault: unrecoverable fault at %p in entry %p",
506                         (void *)vaddr, fs.entry);
507         }
508
509         /*
510          * Make a reference to this object to prevent its disposal while we
511          * are messing with it.  Once we have the reference, the map is free
512          * to be diddled.  Since objects reference their shadows (and copies),
513          * they will stay around as well.
514          *
515          * Bump the paging-in-progress count to prevent size changes (e.g.
516          * truncation operations) during I/O.  This must be done after
517          * obtaining the vnode lock in order to avoid possible deadlocks.
518          */
519         vm_object_reference(fs.first_object);
520         fs.vp = vnode_pager_lock(fs.first_object);
521         vm_object_pip_add(fs.first_object, 1);
522
523         fs.lookup_still_valid = TRUE;
524         fs.first_m = NULL;
525         fs.object = fs.first_object;    /* so unlock_and_deallocate works */
526
527         /*
528          * If the entry is wired we cannot change the page protection.
529          */
530         if (fs.wired)
531                 fault_type = fs.first_prot;
532
533         /*
534          * The page we want is at (first_object, first_pindex), but if the
535          * vm_map_entry is VM_MAPTYPE_VPAGETABLE we have to traverse the
536          * page table to figure out the actual pindex.
537          *
538          * NOTE!  DEVELOPMENT IN PROGRESS, THIS IS AN INITIAL IMPLEMENTATION
539          * ONLY
540          */
541         if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) {
542                 result = vm_fault_vpagetable(&fs, &first_pindex,
543                                              fs.entry->aux.master_pde,
544                                              fault_type);
545                 if (result == KERN_TRY_AGAIN)
546                         goto RetryFault;
547                 if (result != KERN_SUCCESS) {
548                         *errorp = result;
549                         return (NULL);
550                 }
551         }
552
553         /*
554          * Now we have the actual (object, pindex), fault in the page.  If
555          * vm_fault_object() fails it will unlock and deallocate the FS
556          * data.   If it succeeds everything remains locked and fs->object
557          * will have an additinal PIP count if it is not equal to
558          * fs->first_object
559          */
560         result = vm_fault_object(&fs, first_pindex, fault_type);
561
562         if (result == KERN_TRY_AGAIN)
563                 goto RetryFault;
564         if (result != KERN_SUCCESS) {
565                 *errorp = result;
566                 return(NULL);
567         }
568
569         if ((orig_fault_type & VM_PROT_WRITE) &&
570             (fs.prot & VM_PROT_WRITE) == 0) {
571                 *errorp = KERN_PROTECTION_FAILURE;
572                 unlock_and_deallocate(&fs);
573                 return(NULL);
574         }
575
576         /*
577          * On success vm_fault_object() does not unlock or deallocate, and fs.m
578          * will contain a busied page.
579          */
580         unlock_things(&fs);
581
582         /*
583          * Return a held page.  We are not doing any pmap manipulation so do
584          * not set PG_MAPPED.  However, adjust the page flags according to
585          * the fault type because the caller may not use a managed pmapping
586          * (so we don't want to lose the fact that the page will be dirtied
587          * if a write fault was specified).
588          */
589         vm_page_hold(fs.m);
590         vm_page_flag_clear(fs.m, PG_ZERO);
591         if (fault_type & VM_PROT_WRITE)
592                 vm_page_dirty(fs.m);
593
594         /*
595          * Update the pmap.  We really only have to do this if a COW
596          * occured to replace the read-only page with the new page.  For
597          * now just do it unconditionally. XXX
598          */
599         pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, fs.wired);
600         vm_page_flag_set(fs.m, PG_REFERENCED);
601
602         /*
603          * Unbusy the page by activating it.  It remains held and will not
604          * be reclaimed.
605          */
606         vm_page_activate(fs.m);
607
608         if (curthread->td_lwp) {
609                 if (fs.hardfault) {
610                         curthread->td_lwp->lwp_ru.ru_majflt++;
611                 } else {
612                         curthread->td_lwp->lwp_ru.ru_minflt++;
613                 }
614         }
615
616         /*
617          * Unlock everything, and return the held page.
618          */
619         vm_page_wakeup(fs.m);
620         vm_object_deallocate(fs.first_object);
621
622         *errorp = 0;
623         return(fs.m);
624 }
625
626 /*
627  * Fault in the specified (object,offset), dirty the returned page as
628  * needed.  If the requested fault_type cannot be done NULL and an
629  * error is returned.
630  */
631 vm_page_t
632 vm_fault_object_page(vm_object_t object, vm_ooffset_t offset,
633                      vm_prot_t fault_type, int fault_flags, int *errorp)
634 {
635         int result;
636         vm_pindex_t first_pindex;
637         struct faultstate fs;
638         struct vm_map_entry entry;
639
640         bzero(&entry, sizeof(entry));
641         entry.object.vm_object = object;
642         entry.maptype = VM_MAPTYPE_NORMAL;
643         entry.protection = entry.max_protection = fault_type;
644
645         fs.didlimit = 0;
646         fs.hardfault = 0;
647         fs.fault_flags = fault_flags;
648         fs.map = NULL;
649         KKASSERT((fault_flags & VM_FAULT_WIRE_MASK) == 0);
650
651 RetryFault:
652         
653         fs.first_object = object;
654         first_pindex = OFF_TO_IDX(offset);
655         fs.entry = &entry;
656         fs.first_prot = fault_type;
657         fs.wired = 0;
658         /*fs.map_generation = 0; unused */
659
660         /*
661          * Make a reference to this object to prevent its disposal while we
662          * are messing with it.  Once we have the reference, the map is free
663          * to be diddled.  Since objects reference their shadows (and copies),
664          * they will stay around as well.
665          *
666          * Bump the paging-in-progress count to prevent size changes (e.g.
667          * truncation operations) during I/O.  This must be done after
668          * obtaining the vnode lock in order to avoid possible deadlocks.
669          */
670         vm_object_reference(fs.first_object);
671         fs.vp = vnode_pager_lock(fs.first_object);
672         vm_object_pip_add(fs.first_object, 1);
673
674         fs.lookup_still_valid = TRUE;
675         fs.first_m = NULL;
676         fs.object = fs.first_object;    /* so unlock_and_deallocate works */
677
678 #if 0
679         /* XXX future - ability to operate on VM object using vpagetable */
680         if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) {
681                 result = vm_fault_vpagetable(&fs, &first_pindex,
682                                              fs.entry->aux.master_pde,
683                                              fault_type);
684                 if (result == KERN_TRY_AGAIN)
685                         goto RetryFault;
686                 if (result != KERN_SUCCESS) {
687                         *errorp = result;
688                         return (NULL);
689                 }
690         }
691 #endif
692
693         /*
694          * Now we have the actual (object, pindex), fault in the page.  If
695          * vm_fault_object() fails it will unlock and deallocate the FS
696          * data.   If it succeeds everything remains locked and fs->object
697          * will have an additinal PIP count if it is not equal to
698          * fs->first_object
699          */
700         result = vm_fault_object(&fs, first_pindex, fault_type);
701
702         if (result == KERN_TRY_AGAIN)
703                 goto RetryFault;
704         if (result != KERN_SUCCESS) {
705                 *errorp = result;
706                 return(NULL);
707         }
708
709         if ((fault_type & VM_PROT_WRITE) && (fs.prot & VM_PROT_WRITE) == 0) {
710                 *errorp = KERN_PROTECTION_FAILURE;
711                 unlock_and_deallocate(&fs);
712                 return(NULL);
713         }
714
715         /*
716          * On success vm_fault_object() does not unlock or deallocate, and fs.m
717          * will contain a busied page.
718          */
719         unlock_things(&fs);
720
721         /*
722          * Return a held page.  We are not doing any pmap manipulation so do
723          * not set PG_MAPPED.  However, adjust the page flags according to
724          * the fault type because the caller may not use a managed pmapping
725          * (so we don't want to lose the fact that the page will be dirtied
726          * if a write fault was specified).
727          */
728         vm_page_hold(fs.m);
729         vm_page_flag_clear(fs.m, PG_ZERO);
730         if (fault_type & VM_PROT_WRITE)
731                 vm_page_dirty(fs.m);
732
733         /*
734          * Indicate that the page was accessed.
735          */
736         vm_page_flag_set(fs.m, PG_REFERENCED);
737
738         /*
739          * Unbusy the page by activating it.  It remains held and will not
740          * be reclaimed.
741          */
742         vm_page_activate(fs.m);
743
744         if (curthread->td_lwp) {
745                 if (fs.hardfault) {
746                         mycpu->gd_cnt.v_vm_faults++;
747                         curthread->td_lwp->lwp_ru.ru_majflt++;
748                 } else {
749                         curthread->td_lwp->lwp_ru.ru_minflt++;
750                 }
751         }
752
753         /*
754          * Unlock everything, and return the held page.
755          */
756         vm_page_wakeup(fs.m);
757         vm_object_deallocate(fs.first_object);
758
759         *errorp = 0;
760         return(fs.m);
761 }
762
763 /*
764  * Translate the virtual page number (first_pindex) that is relative
765  * to the address space into a logical page number that is relative to the
766  * backing object.  Use the virtual page table pointed to by (vpte).
767  *
768  * This implements an N-level page table.  Any level can terminate the
769  * scan by setting VPTE_PS.   A linear mapping is accomplished by setting
770  * VPTE_PS in the master page directory entry set via mcontrol(MADV_SETMAP).
771  */
772 static
773 int
774 vm_fault_vpagetable(struct faultstate *fs, vm_pindex_t *pindex,
775                     vpte_t vpte, int fault_type)
776 {
777         struct sf_buf *sf;
778         int vshift = 32 - PAGE_SHIFT;   /* page index bits remaining */
779         int result = KERN_SUCCESS;
780         vpte_t *ptep;
781
782         for (;;) {
783                 /*
784                  * We cannot proceed if the vpte is not valid, not readable
785                  * for a read fault, or not writable for a write fault.
786                  */
787                 if ((vpte & VPTE_V) == 0) {
788                         unlock_and_deallocate(fs);
789                         return (KERN_FAILURE);
790                 }
791                 if ((fault_type & VM_PROT_READ) && (vpte & VPTE_R) == 0) {
792                         unlock_and_deallocate(fs);
793                         return (KERN_FAILURE);
794                 }
795                 if ((fault_type & VM_PROT_WRITE) && (vpte & VPTE_W) == 0) {
796                         unlock_and_deallocate(fs);
797                         return (KERN_FAILURE);
798                 }
799                 if ((vpte & VPTE_PS) || vshift == 0)
800                         break;
801                 KKASSERT(vshift >= VPTE_PAGE_BITS);
802
803                 /*
804                  * Get the page table page.  Nominally we only read the page
805                  * table, but since we are actively setting VPTE_M and VPTE_A,
806                  * tell vm_fault_object() that we are writing it. 
807                  *
808                  * There is currently no real need to optimize this.
809                  */
810                 result = vm_fault_object(fs, vpte >> PAGE_SHIFT,
811                                          VM_PROT_READ|VM_PROT_WRITE);
812                 if (result != KERN_SUCCESS)
813                         return (result);
814
815                 /*
816                  * Process the returned fs.m and look up the page table
817                  * entry in the page table page.
818                  */
819                 vshift -= VPTE_PAGE_BITS;
820                 sf = sf_buf_alloc(fs->m, SFB_CPUPRIVATE);
821                 ptep = ((vpte_t *)sf_buf_kva(sf) +
822                         ((*pindex >> vshift) & VPTE_PAGE_MASK));
823                 vpte = *ptep;
824
825                 /*
826                  * Page table write-back.  If the vpte is valid for the
827                  * requested operation, do a write-back to the page table.
828                  *
829                  * XXX VPTE_M is not set properly for page directory pages.
830                  * It doesn't get set in the page directory if the page table
831                  * is modified during a read access.
832                  */
833                 if ((fault_type & VM_PROT_WRITE) && (vpte & VPTE_V) &&
834                     (vpte & VPTE_W)) {
835                         if ((vpte & (VPTE_M|VPTE_A)) != (VPTE_M|VPTE_A)) {
836                                 atomic_set_int(ptep, VPTE_M|VPTE_A);
837                                 vm_page_dirty(fs->m);
838                         }
839                 }
840                 if ((fault_type & VM_PROT_READ) && (vpte & VPTE_V) &&
841                     (vpte & VPTE_R)) {
842                         if ((vpte & VPTE_A) == 0) {
843                                 atomic_set_int(ptep, VPTE_A);
844                                 vm_page_dirty(fs->m);
845                         }
846                 }
847                 sf_buf_free(sf);
848                 vm_page_flag_set(fs->m, PG_REFERENCED);
849                 vm_page_activate(fs->m);
850                 vm_page_wakeup(fs->m);
851                 cleanup_successful_fault(fs);
852         }
853         /*
854          * Combine remaining address bits with the vpte.
855          */
856         *pindex = (vpte >> PAGE_SHIFT) +
857                   (*pindex & ((1 << vshift) - 1));
858         return (KERN_SUCCESS);
859 }
860
861
862 /*
863  * Do all operations required to fault-in (fs.first_object, pindex).  Run
864  * through the shadow chain as necessary and do required COW or virtual
865  * copy operations.  The caller has already fully resolved the vm_map_entry
866  * and, if appropriate, has created a copy-on-write layer.  All we need to
867  * do is iterate the object chain.
868  *
869  * On failure (fs) is unlocked and deallocated and the caller may return or
870  * retry depending on the failure code.  On success (fs) is NOT unlocked or
871  * deallocated, fs.m will contained a resolved, busied page, and fs.object
872  * will have an additional PIP count if it is not equal to fs.first_object.
873  */
874 static
875 int
876 vm_fault_object(struct faultstate *fs,
877                 vm_pindex_t first_pindex, vm_prot_t fault_type)
878 {
879         vm_object_t next_object;
880         vm_page_t marray[VM_FAULT_READ];
881         vm_pindex_t pindex;
882         int faultcount;
883
884         fs->prot = fs->first_prot;
885         fs->object = fs->first_object;
886         pindex = first_pindex;
887
888         /* 
889          * If a read fault occurs we try to make the page writable if
890          * possible.  There are three cases where we cannot make the
891          * page mapping writable:
892          *
893          * (1) The mapping is read-only or the VM object is read-only,
894          *     fs->prot above will simply not have VM_PROT_WRITE set.
895          *
896          * (2) If the mapping is a virtual page table we need to be able
897          *     to detect writes so we can set VPTE_M in the virtual page
898          *     table.
899          *
900          * (3) If the VM page is read-only or copy-on-write, upgrading would
901          *     just result in an unnecessary COW fault.
902          *
903          * VM_PROT_VPAGED is set if faulting via a virtual page table and
904          * causes adjustments to the 'M'odify bit to also turn off write
905          * access to force a re-fault.
906          */
907         if (fs->entry->maptype == VM_MAPTYPE_VPAGETABLE) {
908                 if ((fault_type & VM_PROT_WRITE) == 0)
909                         fs->prot &= ~VM_PROT_WRITE;
910         }
911
912         for (;;) {
913                 /*
914                  * If the object is dead, we stop here
915                  */
916                 if (fs->object->flags & OBJ_DEAD) {
917                         unlock_and_deallocate(fs);
918                         return (KERN_PROTECTION_FAILURE);
919                 }
920
921                 /*
922                  * See if page is resident.  spl protection is required
923                  * to avoid an interrupt unbusy/free race against our
924                  * lookup.  We must hold the protection through a page
925                  * allocation or busy.
926                  */
927                 crit_enter();
928                 fs->m = vm_page_lookup(fs->object, pindex);
929                 if (fs->m != NULL) {
930                         int queue;
931                         /*
932                          * Wait/Retry if the page is busy.  We have to do this
933                          * if the page is busy via either PG_BUSY or 
934                          * vm_page_t->busy because the vm_pager may be using
935                          * vm_page_t->busy for pageouts ( and even pageins if
936                          * it is the vnode pager ), and we could end up trying
937                          * to pagein and pageout the same page simultaneously.
938                          *
939                          * We can theoretically allow the busy case on a read
940                          * fault if the page is marked valid, but since such
941                          * pages are typically already pmap'd, putting that
942                          * special case in might be more effort then it is 
943                          * worth.  We cannot under any circumstances mess
944                          * around with a vm_page_t->busy page except, perhaps,
945                          * to pmap it.
946                          */
947                         if ((fs->m->flags & PG_BUSY) || fs->m->busy) {
948                                 unlock_things(fs);
949                                 vm_page_sleep_busy(fs->m, TRUE, "vmpfw");
950                                 mycpu->gd_cnt.v_intrans++;
951                                 vm_object_deallocate(fs->first_object);
952                                 fs->first_object = NULL;
953                                 crit_exit();
954                                 return (KERN_TRY_AGAIN);
955                         }
956
957                         /*
958                          * If reactivating a page from PQ_CACHE we may have
959                          * to rate-limit.
960                          */
961                         queue = fs->m->queue;
962                         vm_page_unqueue_nowakeup(fs->m);
963
964                         if ((queue - fs->m->pc) == PQ_CACHE && 
965                             vm_page_count_severe()) {
966                                 vm_page_activate(fs->m);
967                                 unlock_and_deallocate(fs);
968                                 vm_waitpfault();
969                                 crit_exit();
970                                 return (KERN_TRY_AGAIN);
971                         }
972
973                         /*
974                          * Mark page busy for other processes, and the 
975                          * pagedaemon.  If it still isn't completely valid
976                          * (readable), or if a read-ahead-mark is set on
977                          * the VM page, jump to readrest, else we found the
978                          * page and can return.
979                          *
980                          * We can release the spl once we have marked the
981                          * page busy.
982                          */
983                         vm_page_busy(fs->m);
984                         crit_exit();
985
986                         if (fs->m->object != &kernel_object) {
987                                 if ((fs->m->valid & VM_PAGE_BITS_ALL) !=
988                                     VM_PAGE_BITS_ALL) {
989                                         goto readrest;
990                                 }
991                                 if (fs->m->flags & PG_RAM) {
992                                         if (debug_cluster)
993                                                 kprintf("R");
994                                         vm_page_flag_clear(fs->m, PG_RAM);
995                                         goto readrest;
996                                 }
997                         }
998                         break; /* break to PAGE HAS BEEN FOUND */
999                 }
1000
1001                 /*
1002                  * Page is not resident, If this is the search termination
1003                  * or the pager might contain the page, allocate a new page.
1004                  *
1005                  * NOTE: We are still in a critical section.
1006                  */
1007                 if (TRYPAGER(fs) || fs->object == fs->first_object) {
1008                         /*
1009                          * If the page is beyond the object size we fail
1010                          */
1011                         if (pindex >= fs->object->size) {
1012                                 crit_exit();
1013                                 unlock_and_deallocate(fs);
1014                                 return (KERN_PROTECTION_FAILURE);
1015                         }
1016
1017                         /*
1018                          * Ratelimit.
1019                          */
1020                         if (fs->didlimit == 0 && curproc != NULL) {
1021                                 int limticks;
1022
1023                                 limticks = vm_fault_ratelimit(curproc->p_vmspace);
1024                                 if (limticks) {
1025                                         crit_exit();
1026                                         unlock_and_deallocate(fs);
1027                                         tsleep(curproc, 0, "vmrate", limticks);
1028                                         fs->didlimit = 1;
1029                                         return (KERN_TRY_AGAIN);
1030                                 }
1031                         }
1032
1033                         /*
1034                          * Allocate a new page for this object/offset pair.
1035                          */
1036                         fs->m = NULL;
1037                         if (!vm_page_count_severe()) {
1038                                 fs->m = vm_page_alloc(fs->object, pindex,
1039                                     (fs->vp || fs->object->backing_object) ? VM_ALLOC_NORMAL : VM_ALLOC_NORMAL | VM_ALLOC_ZERO);
1040                         }
1041                         if (fs->m == NULL) {
1042                                 crit_exit();
1043                                 unlock_and_deallocate(fs);
1044                                 vm_waitpfault();
1045                                 return (KERN_TRY_AGAIN);
1046                         }
1047                 }
1048                 crit_exit();
1049
1050 readrest:
1051                 /*
1052                  * We have found a valid page or we have allocated a new page.
1053                  * The page thus may not be valid or may not be entirely 
1054                  * valid.  Even if entirely valid we may have hit a read-ahead
1055                  * mark and desire to keep the pipeline going.
1056                  *
1057                  * Attempt to fault-in the page if there is a chance that the
1058                  * pager has it, and potentially fault in additional pages
1059                  * at the same time.
1060                  *
1061                  * We are NOT in splvm here and if TRYPAGER is true then
1062                  * fs.m will be non-NULL and will be PG_BUSY for us.
1063                  */
1064
1065                 if (TRYPAGER(fs)) {
1066                         int rv;
1067                         int reqpage;
1068                         int ahead, behind;
1069                         u_char behavior = vm_map_entry_behavior(fs->entry);
1070
1071                         if (behavior == MAP_ENTRY_BEHAV_RANDOM) {
1072                                 ahead = 0;
1073                                 behind = 0;
1074                         } else {
1075                                 behind = pindex;
1076                                 KKASSERT(behind >= 0);
1077                                 if (behind > VM_FAULT_READ_BEHIND)
1078                                         behind = VM_FAULT_READ_BEHIND;
1079
1080                                 ahead = fs->object->size - pindex;
1081                                 if (ahead < 1)
1082                                         ahead = 1;
1083                                 if (ahead > VM_FAULT_READ_AHEAD)
1084                                         ahead = VM_FAULT_READ_AHEAD;
1085                         }
1086
1087                         if ((fs->first_object->type != OBJT_DEVICE) &&
1088                             (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL ||
1089                                 (behavior != MAP_ENTRY_BEHAV_RANDOM &&
1090                                 pindex >= fs->entry->lastr &&
1091                                 pindex < fs->entry->lastr + VM_FAULT_READ))
1092                         ) {
1093                                 vm_pindex_t firstpindex, tmppindex;
1094
1095                                 if (first_pindex < 2 * VM_FAULT_READ)
1096                                         firstpindex = 0;
1097                                 else
1098                                         firstpindex = first_pindex - 2 * VM_FAULT_READ;
1099
1100                                 /*
1101                                  * note: partially valid pages cannot be 
1102                                  * included in the lookahead - NFS piecemeal
1103                                  * writes will barf on it badly.
1104                                  *
1105                                  * spl protection is required to avoid races
1106                                  * between the lookup and an interrupt
1107                                  * unbusy/free sequence occuring prior to
1108                                  * our busy check.
1109                                  */
1110                                 crit_enter();
1111                                 for (tmppindex = first_pindex - 1;
1112                                     tmppindex >= firstpindex;
1113                                     --tmppindex
1114                                 ) {
1115                                         vm_page_t mt;
1116
1117                                         mt = vm_page_lookup(fs->first_object, tmppindex);
1118                                         if (mt == NULL || (mt->valid != VM_PAGE_BITS_ALL))
1119                                                 break;
1120                                         if (mt->busy ||
1121                                                 (mt->flags & (PG_BUSY | PG_FICTITIOUS | PG_UNMANAGED)) ||
1122                                                 mt->hold_count ||
1123                                                 mt->wire_count) 
1124                                                 continue;
1125                                         if (mt->dirty == 0)
1126                                                 vm_page_test_dirty(mt);
1127                                         if (mt->dirty) {
1128                                                 vm_page_busy(mt);
1129                                                 vm_page_protect(mt, VM_PROT_NONE);
1130                                                 vm_page_deactivate(mt);
1131                                                 vm_page_wakeup(mt);
1132                                         } else {
1133                                                 vm_page_cache(mt);
1134                                         }
1135                                 }
1136                                 crit_exit();
1137
1138                                 ahead += behind;
1139                                 behind = 0;
1140                         }
1141
1142                         /*
1143                          * now we find out if any other pages should be paged
1144                          * in at this time this routine checks to see if the
1145                          * pages surrounding this fault reside in the same
1146                          * object as the page for this fault.  If they do,
1147                          * then they are faulted in also into the object.  The
1148                          * array "marray" returned contains an array of
1149                          * vm_page_t structs where one of them is the
1150                          * vm_page_t passed to the routine.  The reqpage
1151                          * return value is the index into the marray for the
1152                          * vm_page_t passed to the routine.
1153                          *
1154                          * fs.m plus the additional pages are PG_BUSY'd.
1155                          */
1156                         faultcount = vm_fault_additional_pages(
1157                             fs->m, behind, ahead, marray, &reqpage);
1158
1159                         /*
1160                          * update lastr imperfectly (we do not know how much
1161                          * getpages will actually read), but good enough.
1162                          */
1163                         fs->entry->lastr = pindex + faultcount - behind;
1164
1165                         /*
1166                          * Call the pager to retrieve the data, if any, after
1167                          * releasing the lock on the map.  We hold a ref on
1168                          * fs.object and the pages are PG_BUSY'd.
1169                          */
1170                         unlock_map(fs);
1171
1172                         if (faultcount) {
1173                                 rv = vm_pager_get_pages(fs->object, marray, 
1174                                                         faultcount, reqpage);
1175                         } else {
1176                                 rv = VM_PAGER_FAIL;
1177                         }
1178
1179                         if (rv == VM_PAGER_OK) {
1180                                 /*
1181                                  * Found the page. Leave it busy while we play
1182                                  * with it.
1183                                  */
1184
1185                                 /*
1186                                  * Relookup in case pager changed page. Pager
1187                                  * is responsible for disposition of old page
1188                                  * if moved.
1189                                  *
1190                                  * XXX other code segments do relookups too.
1191                                  * It's a bad abstraction that needs to be
1192                                  * fixed/removed.
1193                                  */
1194                                 fs->m = vm_page_lookup(fs->object, pindex);
1195                                 if (fs->m == NULL) {
1196                                         unlock_and_deallocate(fs);
1197                                         return (KERN_TRY_AGAIN);
1198                                 }
1199
1200                                 ++fs->hardfault;
1201                                 break; /* break to PAGE HAS BEEN FOUND */
1202                         }
1203
1204                         /*
1205                          * Remove the bogus page (which does not exist at this
1206                          * object/offset); before doing so, we must get back
1207                          * our object lock to preserve our invariant.
1208                          *
1209                          * Also wake up any other process that may want to bring
1210                          * in this page.
1211                          *
1212                          * If this is the top-level object, we must leave the
1213                          * busy page to prevent another process from rushing
1214                          * past us, and inserting the page in that object at
1215                          * the same time that we are.
1216                          */
1217                         if (rv == VM_PAGER_ERROR) {
1218                                 if (curproc)
1219                                         kprintf("vm_fault: pager read error, pid %d (%s)\n", curproc->p_pid, curproc->p_comm);
1220                                 else
1221                                         kprintf("vm_fault: pager read error, thread %p (%s)\n", curthread, curproc->p_comm);
1222                         }
1223                         /*
1224                          * Data outside the range of the pager or an I/O error
1225                          *
1226                          * The page may have been wired during the pagein,
1227                          * e.g. by the buffer cache, and cannot simply be
1228                          * freed.  Call vnode_pager_freepag() to deal with it.
1229                          */
1230                         /*
1231                          * XXX - the check for kernel_map is a kludge to work
1232                          * around having the machine panic on a kernel space
1233                          * fault w/ I/O error.
1234                          */
1235                         if (((fs->map != &kernel_map) && (rv == VM_PAGER_ERROR)) ||
1236                                 (rv == VM_PAGER_BAD)) {
1237                                 vnode_pager_freepage(fs->m);
1238                                 fs->m = NULL;
1239                                 unlock_and_deallocate(fs);
1240                                 if (rv == VM_PAGER_ERROR)
1241                                         return (KERN_FAILURE);
1242                                 else
1243                                         return (KERN_PROTECTION_FAILURE);
1244                                 /* NOT REACHED */
1245                         }
1246                         if (fs->object != fs->first_object) {
1247                                 vnode_pager_freepage(fs->m);
1248                                 fs->m = NULL;
1249                                 /*
1250                                  * XXX - we cannot just fall out at this
1251                                  * point, m has been freed and is invalid!
1252                                  */
1253                         }
1254                 }
1255
1256                 /*
1257                  * We get here if the object has a default pager (or unwiring) 
1258                  * or the pager doesn't have the page.
1259                  */
1260                 if (fs->object == fs->first_object)
1261                         fs->first_m = fs->m;
1262
1263                 /*
1264                  * Move on to the next object.  Lock the next object before
1265                  * unlocking the current one.
1266                  */
1267                 pindex += OFF_TO_IDX(fs->object->backing_object_offset);
1268                 next_object = fs->object->backing_object;
1269                 if (next_object == NULL) {
1270                         /*
1271                          * If there's no object left, fill the page in the top
1272                          * object with zeros.
1273                          */
1274                         if (fs->object != fs->first_object) {
1275                                 vm_object_pip_wakeup(fs->object);
1276
1277                                 fs->object = fs->first_object;
1278                                 pindex = first_pindex;
1279                                 fs->m = fs->first_m;
1280                         }
1281                         fs->first_m = NULL;
1282
1283                         /*
1284                          * Zero the page if necessary and mark it valid.
1285                          */
1286                         if ((fs->m->flags & PG_ZERO) == 0) {
1287                                 vm_page_zero_fill(fs->m);
1288                         } else {
1289                                 mycpu->gd_cnt.v_ozfod++;
1290                         }
1291                         mycpu->gd_cnt.v_zfod++;
1292                         fs->m->valid = VM_PAGE_BITS_ALL;
1293                         break;  /* break to PAGE HAS BEEN FOUND */
1294                 } else {
1295                         if (fs->object != fs->first_object) {
1296                                 vm_object_pip_wakeup(fs->object);
1297                         }
1298                         KASSERT(fs->object != next_object, ("object loop %p", next_object));
1299                         fs->object = next_object;
1300                         vm_object_pip_add(fs->object, 1);
1301                 }
1302         }
1303
1304         KASSERT((fs->m->flags & PG_BUSY) != 0,
1305                 ("vm_fault: not busy after main loop"));
1306
1307         /*
1308          * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock
1309          * is held.]
1310          */
1311
1312         /*
1313          * If the page is being written, but isn't already owned by the
1314          * top-level object, we have to copy it into a new page owned by the
1315          * top-level object.
1316          */
1317         if (fs->object != fs->first_object) {
1318                 /*
1319                  * We only really need to copy if we want to write it.
1320                  */
1321                 if (fault_type & VM_PROT_WRITE) {
1322                         /*
1323                          * This allows pages to be virtually copied from a 
1324                          * backing_object into the first_object, where the 
1325                          * backing object has no other refs to it, and cannot
1326                          * gain any more refs.  Instead of a bcopy, we just 
1327                          * move the page from the backing object to the 
1328                          * first object.  Note that we must mark the page 
1329                          * dirty in the first object so that it will go out 
1330                          * to swap when needed.
1331                          */
1332                         if (
1333                                 /*
1334                                  * Map, if present, has not changed
1335                                  */
1336                                 (fs->map == NULL ||
1337                                 fs->map_generation == fs->map->timestamp) &&
1338                                 /*
1339                                  * Only one shadow object
1340                                  */
1341                                 (fs->object->shadow_count == 1) &&
1342                                 /*
1343                                  * No COW refs, except us
1344                                  */
1345                                 (fs->object->ref_count == 1) &&
1346                                 /*
1347                                  * No one else can look this object up
1348                                  */
1349                                 (fs->object->handle == NULL) &&
1350                                 /*
1351                                  * No other ways to look the object up
1352                                  */
1353                                 ((fs->object->type == OBJT_DEFAULT) ||
1354                                  (fs->object->type == OBJT_SWAP)) &&
1355                                 /*
1356                                  * We don't chase down the shadow chain
1357                                  */
1358                                 (fs->object == fs->first_object->backing_object) &&
1359
1360                                 /*
1361                                  * grab the lock if we need to
1362                                  */
1363                                 (fs->lookup_still_valid ||
1364                                  fs->map == NULL ||
1365                                  lockmgr(&fs->map->lock, LK_EXCLUSIVE|LK_NOWAIT) == 0)
1366                             ) {
1367                                 
1368                                 fs->lookup_still_valid = 1;
1369                                 /*
1370                                  * get rid of the unnecessary page
1371                                  */
1372                                 vm_page_protect(fs->first_m, VM_PROT_NONE);
1373                                 vm_page_free(fs->first_m);
1374                                 fs->first_m = NULL;
1375
1376                                 /*
1377                                  * grab the page and put it into the 
1378                                  * process'es object.  The page is 
1379                                  * automatically made dirty.
1380                                  */
1381                                 vm_page_rename(fs->m, fs->first_object, first_pindex);
1382                                 fs->first_m = fs->m;
1383                                 vm_page_busy(fs->first_m);
1384                                 fs->m = NULL;
1385                                 mycpu->gd_cnt.v_cow_optim++;
1386                         } else {
1387                                 /*
1388                                  * Oh, well, lets copy it.
1389                                  */
1390                                 vm_page_copy(fs->m, fs->first_m);
1391                                 vm_page_event(fs->m, VMEVENT_COW);
1392                         }
1393
1394                         if (fs->m) {
1395                                 /*
1396                                  * We no longer need the old page or object.
1397                                  */
1398                                 release_page(fs);
1399                         }
1400
1401                         /*
1402                          * fs->object != fs->first_object due to above 
1403                          * conditional
1404                          */
1405                         vm_object_pip_wakeup(fs->object);
1406
1407                         /*
1408                          * Only use the new page below...
1409                          */
1410
1411                         mycpu->gd_cnt.v_cow_faults++;
1412                         fs->m = fs->first_m;
1413                         fs->object = fs->first_object;
1414                         pindex = first_pindex;
1415                 } else {
1416                         /*
1417                          * If it wasn't a write fault avoid having to copy
1418                          * the page by mapping it read-only.
1419                          */
1420                         fs->prot &= ~VM_PROT_WRITE;
1421                 }
1422         }
1423
1424         /*
1425          * We may have had to unlock a map to do I/O.  If we did then
1426          * lookup_still_valid will be FALSE.  If the map generation count
1427          * also changed then all sorts of things could have happened while
1428          * we were doing the I/O and we need to retry.
1429          */
1430
1431         if (!fs->lookup_still_valid &&
1432             fs->map != NULL &&
1433             (fs->map->timestamp != fs->map_generation)) {
1434                 release_page(fs);
1435                 unlock_and_deallocate(fs);
1436                 return (KERN_TRY_AGAIN);
1437         }
1438
1439         /*
1440          * If the fault is a write, we know that this page is being
1441          * written NOW so dirty it explicitly to save on pmap_is_modified()
1442          * calls later.
1443          *
1444          * If this is a NOSYNC mmap we do not want to set PG_NOSYNC
1445          * if the page is already dirty to prevent data written with
1446          * the expectation of being synced from not being synced.
1447          * Likewise if this entry does not request NOSYNC then make
1448          * sure the page isn't marked NOSYNC.  Applications sharing
1449          * data should use the same flags to avoid ping ponging.
1450          *
1451          * Also tell the backing pager, if any, that it should remove
1452          * any swap backing since the page is now dirty.
1453          */
1454         if (fs->prot & VM_PROT_WRITE) {
1455                 vm_object_set_writeable_dirty(fs->m->object);
1456                 if (fs->entry->eflags & MAP_ENTRY_NOSYNC) {
1457                         if (fs->m->dirty == 0)
1458                                 vm_page_flag_set(fs->m, PG_NOSYNC);
1459                 } else {
1460                         vm_page_flag_clear(fs->m, PG_NOSYNC);
1461                 }
1462                 if (fs->fault_flags & VM_FAULT_DIRTY) {
1463                         crit_enter();
1464                         vm_page_dirty(fs->m);
1465                         vm_pager_page_unswapped(fs->m);
1466                         crit_exit();
1467                 }
1468         }
1469
1470         /*
1471          * Page had better still be busy.  We are still locked up and 
1472          * fs->object will have another PIP reference if it is not equal
1473          * to fs->first_object.
1474          */
1475         KASSERT(fs->m->flags & PG_BUSY,
1476                 ("vm_fault: page %p not busy!", fs->m));
1477
1478         /*
1479          * Sanity check: page must be completely valid or it is not fit to
1480          * map into user space.  vm_pager_get_pages() ensures this.
1481          */
1482         if (fs->m->valid != VM_PAGE_BITS_ALL) {
1483                 vm_page_zero_invalid(fs->m, TRUE);
1484                 kprintf("Warning: page %p partially invalid on fault\n", fs->m);
1485         }
1486
1487         return (KERN_SUCCESS);
1488 }
1489
1490 /*
1491  * Wire down a range of virtual addresses in a map.  The entry in question
1492  * should be marked in-transition and the map must be locked.  We must
1493  * release the map temporarily while faulting-in the page to avoid a
1494  * deadlock.  Note that the entry may be clipped while we are blocked but
1495  * will never be freed.
1496  */
1497 int
1498 vm_fault_wire(vm_map_t map, vm_map_entry_t entry, boolean_t user_wire)
1499 {
1500         boolean_t fictitious;
1501         vm_offset_t start;
1502         vm_offset_t end;
1503         vm_offset_t va;
1504         vm_paddr_t pa;
1505         pmap_t pmap;
1506         int rv;
1507
1508         pmap = vm_map_pmap(map);
1509         start = entry->start;
1510         end = entry->end;
1511         fictitious = entry->object.vm_object &&
1512                         (entry->object.vm_object->type == OBJT_DEVICE);
1513
1514         vm_map_unlock(map);
1515         map->timestamp++;
1516
1517         /*
1518          * We simulate a fault to get the page and enter it in the physical
1519          * map.
1520          */
1521         for (va = start; va < end; va += PAGE_SIZE) {
1522                 if (user_wire) {
1523                         rv = vm_fault(map, va, VM_PROT_READ, 
1524                                         VM_FAULT_USER_WIRE);
1525                 } else {
1526                         rv = vm_fault(map, va, VM_PROT_READ|VM_PROT_WRITE,
1527                                         VM_FAULT_CHANGE_WIRING);
1528                 }
1529                 if (rv) {
1530                         while (va > start) {
1531                                 va -= PAGE_SIZE;
1532                                 if ((pa = pmap_extract(pmap, va)) == 0)
1533                                         continue;
1534                                 pmap_change_wiring(pmap, va, FALSE);
1535                                 if (!fictitious)
1536                                         vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1);
1537                         }
1538                         vm_map_lock(map);
1539                         return (rv);
1540                 }
1541         }
1542         vm_map_lock(map);
1543         return (KERN_SUCCESS);
1544 }
1545
1546 /*
1547  * Unwire a range of virtual addresses in a map.  The map should be
1548  * locked.
1549  */
1550 void
1551 vm_fault_unwire(vm_map_t map, vm_map_entry_t entry)
1552 {
1553         boolean_t fictitious;
1554         vm_offset_t start;
1555         vm_offset_t end;
1556         vm_offset_t va;
1557         vm_paddr_t pa;
1558         pmap_t pmap;
1559
1560         pmap = vm_map_pmap(map);
1561         start = entry->start;
1562         end = entry->end;
1563         fictitious = entry->object.vm_object &&
1564                         (entry->object.vm_object->type == OBJT_DEVICE);
1565
1566         /*
1567          * Since the pages are wired down, we must be able to get their
1568          * mappings from the physical map system.
1569          */
1570         for (va = start; va < end; va += PAGE_SIZE) {
1571                 pa = pmap_extract(pmap, va);
1572                 if (pa != 0) {
1573                         pmap_change_wiring(pmap, va, FALSE);
1574                         if (!fictitious)
1575                                 vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1);
1576                 }
1577         }
1578 }
1579
1580 /*
1581  * Reduce the rate at which memory is allocated to a process based
1582  * on the perceived load on the VM system. As the load increases
1583  * the allocation burst rate goes down and the delay increases. 
1584  *
1585  * Rate limiting does not apply when faulting active or inactive
1586  * pages.  When faulting 'cache' pages, rate limiting only applies
1587  * if the system currently has a severe page deficit.
1588  *
1589  * XXX vm_pagesupply should be increased when a page is freed.
1590  *
1591  * We sleep up to 1/10 of a second.
1592  */
1593 static int
1594 vm_fault_ratelimit(struct vmspace *vmspace)
1595 {
1596         if (vm_load_enable == 0)
1597                 return(0);
1598         if (vmspace->vm_pagesupply > 0) {
1599                 --vmspace->vm_pagesupply;
1600                 return(0);
1601         }
1602 #ifdef INVARIANTS
1603         if (vm_load_debug) {
1604                 kprintf("load %-4d give %d pgs, wait %d, pid %-5d (%s)\n",
1605                         vm_load, 
1606                         (1000 - vm_load ) / 10, vm_load * hz / 10000,
1607                         curproc->p_pid, curproc->p_comm);
1608         }
1609 #endif
1610         vmspace->vm_pagesupply = (1000 - vm_load) / 10;
1611         return(vm_load * hz / 10000);
1612 }
1613
1614 /*
1615  *      Routine:
1616  *              vm_fault_copy_entry
1617  *      Function:
1618  *              Copy all of the pages from a wired-down map entry to another.
1619  *
1620  *      In/out conditions:
1621  *              The source and destination maps must be locked for write.
1622  *              The source map entry must be wired down (or be a sharing map
1623  *              entry corresponding to a main map entry that is wired down).
1624  */
1625
1626 void
1627 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
1628     vm_map_entry_t dst_entry, vm_map_entry_t src_entry)
1629 {
1630         vm_object_t dst_object;
1631         vm_object_t src_object;
1632         vm_ooffset_t dst_offset;
1633         vm_ooffset_t src_offset;
1634         vm_prot_t prot;
1635         vm_offset_t vaddr;
1636         vm_page_t dst_m;
1637         vm_page_t src_m;
1638
1639 #ifdef  lint
1640         src_map++;
1641 #endif  /* lint */
1642
1643         src_object = src_entry->object.vm_object;
1644         src_offset = src_entry->offset;
1645
1646         /*
1647          * Create the top-level object for the destination entry. (Doesn't
1648          * actually shadow anything - we copy the pages directly.)
1649          */
1650         vm_map_entry_allocate_object(dst_entry);
1651         dst_object = dst_entry->object.vm_object;
1652
1653         prot = dst_entry->max_protection;
1654
1655         /*
1656          * Loop through all of the pages in the entry's range, copying each
1657          * one from the source object (it should be there) to the destination
1658          * object.
1659          */
1660         for (vaddr = dst_entry->start, dst_offset = 0;
1661             vaddr < dst_entry->end;
1662             vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) {
1663
1664                 /*
1665                  * Allocate a page in the destination object
1666                  */
1667                 do {
1668                         dst_m = vm_page_alloc(dst_object,
1669                                 OFF_TO_IDX(dst_offset), VM_ALLOC_NORMAL);
1670                         if (dst_m == NULL) {
1671                                 vm_wait(0);
1672                         }
1673                 } while (dst_m == NULL);
1674
1675                 /*
1676                  * Find the page in the source object, and copy it in.
1677                  * (Because the source is wired down, the page will be in
1678                  * memory.)
1679                  */
1680                 src_m = vm_page_lookup(src_object,
1681                         OFF_TO_IDX(dst_offset + src_offset));
1682                 if (src_m == NULL)
1683                         panic("vm_fault_copy_wired: page missing");
1684
1685                 vm_page_copy(src_m, dst_m);
1686                 vm_page_event(src_m, VMEVENT_COW);
1687
1688                 /*
1689                  * Enter it in the pmap...
1690                  */
1691
1692                 vm_page_flag_clear(dst_m, PG_ZERO);
1693                 pmap_enter(dst_map->pmap, vaddr, dst_m, prot, FALSE);
1694
1695                 /*
1696                  * Mark it no longer busy, and put it on the active list.
1697                  */
1698                 vm_page_activate(dst_m);
1699                 vm_page_wakeup(dst_m);
1700         }
1701 }
1702
1703
1704 /*
1705  * This routine checks around the requested page for other pages that
1706  * might be able to be faulted in.  This routine brackets the viable
1707  * pages for the pages to be paged in.
1708  *
1709  * Inputs:
1710  *      m, rbehind, rahead
1711  *
1712  * Outputs:
1713  *  marray (array of vm_page_t), reqpage (index of requested page)
1714  *
1715  * Return value:
1716  *  number of pages in marray
1717  */
1718 static int
1719 vm_fault_additional_pages(vm_page_t m, int rbehind, int rahead,
1720                           vm_page_t *marray, int *reqpage)
1721 {
1722         int i,j;
1723         vm_object_t object;
1724         vm_pindex_t pindex, startpindex, endpindex, tpindex;
1725         vm_page_t rtm;
1726         int cbehind, cahead;
1727
1728         object = m->object;
1729         pindex = m->pindex;
1730
1731         /*
1732          * we don't fault-ahead for device pager
1733          */
1734         if (object->type == OBJT_DEVICE) {
1735                 *reqpage = 0;
1736                 marray[0] = m;
1737                 return 1;
1738         }
1739
1740         /*
1741          * if the requested page is not available, then give up now
1742          */
1743         if (!vm_pager_has_page(object, pindex, &cbehind, &cahead)) {
1744                 *reqpage = 0;   /* not used by caller, fix compiler warn */
1745                 return 0;
1746         }
1747
1748         if ((cbehind == 0) && (cahead == 0)) {
1749                 *reqpage = 0;
1750                 marray[0] = m;
1751                 return 1;
1752         }
1753
1754         if (rahead > cahead) {
1755                 rahead = cahead;
1756         }
1757
1758         if (rbehind > cbehind) {
1759                 rbehind = cbehind;
1760         }
1761
1762         /*
1763          * Do not do any readahead if we have insufficient free memory.
1764          *
1765          * XXX code was broken disabled before and has instability
1766          * with this conditonal fixed, so shortcut for now.
1767          */
1768         if (burst_fault == 0 || vm_page_count_severe()) {
1769                 marray[0] = m;
1770                 *reqpage = 0;
1771                 return 1;
1772         }
1773
1774         /*
1775          * scan backward for the read behind pages -- in memory 
1776          *
1777          * Assume that if the page is not found an interrupt will not
1778          * create it.  Theoretically interrupts can only remove (busy)
1779          * pages, not create new associations.
1780          */
1781         if (pindex > 0) {
1782                 if (rbehind > pindex) {
1783                         rbehind = pindex;
1784                         startpindex = 0;
1785                 } else {
1786                         startpindex = pindex - rbehind;
1787                 }
1788
1789                 crit_enter();
1790                 for (tpindex = pindex; tpindex > startpindex; --tpindex) {
1791                         if (vm_page_lookup(object, tpindex - 1))
1792                                 break;
1793                 }
1794
1795                 i = 0;
1796                 while (tpindex < pindex) {
1797                         rtm = vm_page_alloc(object, tpindex, VM_ALLOC_SYSTEM);
1798                         if (rtm == NULL) {
1799                                 crit_exit();
1800                                 for (j = 0; j < i; j++) {
1801                                         vm_page_free(marray[j]);
1802                                 }
1803                                 marray[0] = m;
1804                                 *reqpage = 0;
1805                                 return 1;
1806                         }
1807                         marray[i] = rtm;
1808                         ++i;
1809                         ++tpindex;
1810                 }
1811                 crit_exit();
1812         } else {
1813                 i = 0;
1814         }
1815
1816         /*
1817          * Assign requested page
1818          */
1819         marray[i] = m;
1820         *reqpage = i;
1821         ++i;
1822
1823         /*
1824          * Scan forwards for read-ahead pages
1825          */
1826         tpindex = pindex + 1;
1827         endpindex = tpindex + rahead;
1828         if (endpindex > object->size)
1829                 endpindex = object->size;
1830
1831         crit_enter();
1832         while (tpindex < endpindex) {
1833                 if (vm_page_lookup(object, tpindex))
1834                         break;
1835                 rtm = vm_page_alloc(object, tpindex, VM_ALLOC_SYSTEM);
1836                 if (rtm == NULL)
1837                         break;
1838                 marray[i] = rtm;
1839                 ++i;
1840                 ++tpindex;
1841         }
1842         crit_exit();
1843
1844         return (i);
1845 }