Merge from vendor branch FILE:
[dragonfly.git] / sys / vm / vm_fault.c
1 /*
2  * Copyright (c) 1991, 1993
3  *      The Regents of the University of California.  All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  *
9  *
10  * This code is derived from software contributed to Berkeley by
11  * The Mach Operating System project at Carnegie-Mellon University.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. All advertising materials mentioning features or use of this software
22  *    must display the following acknowledgement:
23  *      This product includes software developed by the University of
24  *      California, Berkeley and its contributors.
25  * 4. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  *      from: @(#)vm_fault.c    8.4 (Berkeley) 1/12/94
42  *
43  *
44  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
45  * All rights reserved.
46  *
47  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
48  *
49  * Permission to use, copy, modify and distribute this software and
50  * its documentation is hereby granted, provided that both the copyright
51  * notice and this permission notice appear in all copies of the
52  * software, derivative works or modified versions, and any portions
53  * thereof, and that both notices appear in supporting documentation.
54  *
55  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
56  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
57  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
58  *
59  * Carnegie Mellon requests users of this software to return to
60  *
61  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
62  *  School of Computer Science
63  *  Carnegie Mellon University
64  *  Pittsburgh PA 15213-3890
65  *
66  * any improvements or extensions that they make and grant Carnegie the
67  * rights to redistribute these changes.
68  *
69  * $FreeBSD: src/sys/vm/vm_fault.c,v 1.108.2.8 2002/02/26 05:49:27 silby Exp $
70  * $DragonFly: src/sys/vm/vm_fault.c,v 1.44 2007/08/28 01:09:07 dillon Exp $
71  */
72
73 /*
74  *      Page fault handling module.
75  */
76
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/kernel.h>
80 #include <sys/proc.h>
81 #include <sys/vnode.h>
82 #include <sys/resourcevar.h>
83 #include <sys/vmmeter.h>
84 #include <sys/vkernel.h>
85 #include <sys/sfbuf.h>
86 #include <sys/lock.h>
87
88 #include <vm/vm.h>
89 #include <vm/vm_param.h>
90 #include <vm/pmap.h>
91 #include <vm/vm_map.h>
92 #include <vm/vm_object.h>
93 #include <vm/vm_page.h>
94 #include <vm/vm_pageout.h>
95 #include <vm/vm_kern.h>
96 #include <vm/vm_pager.h>
97 #include <vm/vnode_pager.h>
98 #include <vm/vm_extern.h>
99
100 #include <sys/thread2.h>
101 #include <vm/vm_page2.h>
102
103 #define VM_FAULT_READ_AHEAD 8
104 #define VM_FAULT_READ_BEHIND 7
105 #define VM_FAULT_READ (VM_FAULT_READ_AHEAD+VM_FAULT_READ_BEHIND+1)
106
107 struct faultstate {
108         vm_page_t m;
109         vm_object_t object;
110         vm_pindex_t pindex;
111         vm_prot_t prot;
112         vm_page_t first_m;
113         vm_object_t first_object;
114         vm_prot_t first_prot;
115         vm_map_t map;
116         vm_map_entry_t entry;
117         int lookup_still_valid;
118         int didlimit;
119         int hardfault;
120         int fault_flags;
121         int map_generation;
122         boolean_t wired;
123         struct vnode *vp;
124 };
125
126 static int vm_fault_object(struct faultstate *, vm_pindex_t, vm_prot_t);
127 static int vm_fault_vpagetable(struct faultstate *, vm_pindex_t *, vpte_t, int);
128 static int vm_fault_additional_pages (vm_page_t, int, int, vm_page_t *, int *);
129 static int vm_fault_ratelimit(struct vmspace *);
130
131 static __inline void
132 release_page(struct faultstate *fs)
133 {
134         vm_page_wakeup(fs->m);
135         vm_page_deactivate(fs->m);
136         fs->m = NULL;
137 }
138
139 static __inline void
140 unlock_map(struct faultstate *fs)
141 {
142         if (fs->lookup_still_valid && fs->map) {
143                 vm_map_lookup_done(fs->map, fs->entry, 0);
144                 fs->lookup_still_valid = FALSE;
145         }
146 }
147
148 /*
149  * Clean up after a successful call to vm_fault_object() so another call
150  * to vm_fault_object() can be made.
151  */
152 static void
153 _cleanup_successful_fault(struct faultstate *fs, int relock)
154 {
155         if (fs->object != fs->first_object) {
156                 vm_page_free(fs->first_m);
157                 vm_object_pip_wakeup(fs->object);
158                 fs->first_m = NULL;
159         }
160         fs->object = fs->first_object;
161         if (relock && fs->lookup_still_valid == FALSE) {
162                 if (fs->map)
163                         vm_map_lock_read(fs->map);
164                 fs->lookup_still_valid = TRUE;
165         }
166 }
167
168 static void
169 _unlock_things(struct faultstate *fs, int dealloc)
170 {
171         vm_object_pip_wakeup(fs->first_object);
172         _cleanup_successful_fault(fs, 0);
173         if (dealloc) {
174                 vm_object_deallocate(fs->first_object);
175         }
176         unlock_map(fs); 
177         if (fs->vp != NULL) { 
178                 vput(fs->vp);
179                 fs->vp = NULL;
180         }
181 }
182
183 #define unlock_things(fs) _unlock_things(fs, 0)
184 #define unlock_and_deallocate(fs) _unlock_things(fs, 1)
185 #define cleanup_successful_fault(fs) _cleanup_successful_fault(fs, 1)
186
187 /*
188  * TRYPAGER 
189  *
190  * Determine if the pager for the current object *might* contain the page.
191  *
192  * We only need to try the pager if this is not a default object (default
193  * objects are zero-fill and have no real pager), and if we are not taking
194  * a wiring fault or if the FS entry is wired.
195  */
196 #define TRYPAGER(fs)    \
197                 (fs->object->type != OBJT_DEFAULT && \
198                 (((fs->fault_flags & VM_FAULT_WIRE_MASK) == 0) || fs->wired))
199
200 /*
201  * vm_fault:
202  *
203  * Handle a page fault occuring at the given address, requiring the given
204  * permissions, in the map specified.  If successful, the page is inserted
205  * into the associated physical map.
206  *
207  * NOTE: The given address should be truncated to the proper page address.
208  *
209  * KERN_SUCCESS is returned if the page fault is handled; otherwise,
210  * a standard error specifying why the fault is fatal is returned.
211  *
212  * The map in question must be referenced, and remains so.
213  * The caller may hold no locks.
214  */
215 int
216 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags)
217 {
218         int result;
219         vm_pindex_t first_pindex;
220         struct faultstate fs;
221
222         mycpu->gd_cnt.v_vm_faults++;
223
224         fs.didlimit = 0;
225         fs.hardfault = 0;
226         fs.fault_flags = fault_flags;
227
228 RetryFault:
229         /*
230          * Find the vm_map_entry representing the backing store and resolve
231          * the top level object and page index.  This may have the side
232          * effect of executing a copy-on-write on the map entry and/or
233          * creating a shadow object, but will not COW any actual VM pages.
234          *
235          * On success fs.map is left read-locked and various other fields 
236          * are initialized but not otherwise referenced or locked.
237          *
238          * NOTE!  vm_map_lookup will try to upgrade the fault_type to
239          * VM_FAULT_WRITE if the map entry is a virtual page table and also
240          * writable, so we can set the 'A'accessed bit in the virtual page
241          * table entry.
242          */
243         fs.map = map;
244         result = vm_map_lookup(&fs.map, vaddr, fault_type,
245                                &fs.entry, &fs.first_object,
246                                &first_pindex, &fs.first_prot, &fs.wired);
247
248         /*
249          * If the lookup failed or the map protections are incompatible,
250          * the fault generally fails.  However, if the caller is trying
251          * to do a user wiring we have more work to do.
252          */
253         if (result != KERN_SUCCESS) {
254                 if (result != KERN_PROTECTION_FAILURE)
255                         return result;
256                 if ((fs.fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE)
257                         return result;
258
259                 /*
260                  * If we are user-wiring a r/w segment, and it is COW, then
261                  * we need to do the COW operation.  Note that we don't
262                  * currently COW RO sections now, because it is NOT desirable
263                  * to COW .text.  We simply keep .text from ever being COW'ed
264                  * and take the heat that one cannot debug wired .text sections.
265                  */
266                 result = vm_map_lookup(&fs.map, vaddr,
267                                        VM_PROT_READ|VM_PROT_WRITE|
268                                         VM_PROT_OVERRIDE_WRITE,
269                                        &fs.entry, &fs.first_object,
270                                        &first_pindex, &fs.first_prot,
271                                        &fs.wired);
272                 if (result != KERN_SUCCESS)
273                         return result;
274
275                 /*
276                  * If we don't COW now, on a user wire, the user will never
277                  * be able to write to the mapping.  If we don't make this
278                  * restriction, the bookkeeping would be nearly impossible.
279                  */
280                 if ((fs.entry->protection & VM_PROT_WRITE) == 0)
281                         fs.entry->max_protection &= ~VM_PROT_WRITE;
282         }
283
284         /*
285          * fs.map is read-locked
286          *
287          * Misc checks.  Save the map generation number to detect races.
288          */
289         fs.map_generation = fs.map->timestamp;
290
291         if (fs.entry->eflags & MAP_ENTRY_NOFAULT) {
292                 panic("vm_fault: fault on nofault entry, addr: %lx",
293                     (u_long)vaddr);
294         }
295
296         /*
297          * A system map entry may return a NULL object.  No object means
298          * no pager means an unrecoverable kernel fault.
299          */
300         if (fs.first_object == NULL) {
301                 panic("vm_fault: unrecoverable fault at %p in entry %p",
302                         (void *)vaddr, fs.entry);
303         }
304
305         /*
306          * Make a reference to this object to prevent its disposal while we
307          * are messing with it.  Once we have the reference, the map is free
308          * to be diddled.  Since objects reference their shadows (and copies),
309          * they will stay around as well.
310          *
311          * Bump the paging-in-progress count to prevent size changes (e.g.
312          * truncation operations) during I/O.  This must be done after
313          * obtaining the vnode lock in order to avoid possible deadlocks.
314          */
315         vm_object_reference(fs.first_object);
316         fs.vp = vnode_pager_lock(fs.first_object);
317         vm_object_pip_add(fs.first_object, 1);
318
319         fs.lookup_still_valid = TRUE;
320         fs.first_m = NULL;
321         fs.object = fs.first_object;    /* so unlock_and_deallocate works */
322
323         /*
324          * If the entry is wired we cannot change the page protection.
325          */
326         if (fs.wired)
327                 fault_type = fs.first_prot;
328
329         /*
330          * The page we want is at (first_object, first_pindex), but if the
331          * vm_map_entry is VM_MAPTYPE_VPAGETABLE we have to traverse the
332          * page table to figure out the actual pindex.
333          *
334          * NOTE!  DEVELOPMENT IN PROGRESS, THIS IS AN INITIAL IMPLEMENTATION
335          * ONLY
336          */
337         if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) {
338                 result = vm_fault_vpagetable(&fs, &first_pindex,
339                                              fs.entry->aux.master_pde,
340                                              fault_type);
341                 if (result == KERN_TRY_AGAIN)
342                         goto RetryFault;
343                 if (result != KERN_SUCCESS)
344                         return (result);
345         }
346
347         /*
348          * Now we have the actual (object, pindex), fault in the page.  If
349          * vm_fault_object() fails it will unlock and deallocate the FS
350          * data.   If it succeeds everything remains locked and fs->object
351          * will have an additinal PIP count if it is not equal to
352          * fs->first_object
353          *
354          * vm_fault_object will set fs->prot for the pmap operation.  It is
355          * allowed to set VM_PROT_WRITE if fault_type == VM_PROT_READ if the
356          * page can be safely written.  However, it will force a read-only
357          * mapping for a read fault if the memory is managed by a virtual
358          * page table.
359          */
360         result = vm_fault_object(&fs, first_pindex, fault_type);
361
362         if (result == KERN_TRY_AGAIN)
363                 goto RetryFault;
364         if (result != KERN_SUCCESS)
365                 return (result);
366
367         /*
368          * On success vm_fault_object() does not unlock or deallocate, and fs.m
369          * will contain a busied page.
370          *
371          * Enter the page into the pmap and do pmap-related adjustments.
372          */
373         unlock_things(&fs);
374         pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, fs.wired);
375
376         if (((fs.fault_flags & VM_FAULT_WIRE_MASK) == 0) && (fs.wired == 0)) {
377                 pmap_prefault(fs.map->pmap, vaddr, fs.entry);
378         }
379
380         vm_page_flag_clear(fs.m, PG_ZERO);
381         vm_page_flag_set(fs.m, PG_MAPPED|PG_REFERENCED);
382
383         /*
384          * If the page is not wired down, then put it where the pageout daemon
385          * can find it.
386          */
387         if (fs.fault_flags & VM_FAULT_WIRE_MASK) {
388                 if (fs.wired)
389                         vm_page_wire(fs.m);
390                 else
391                         vm_page_unwire(fs.m, 1);
392         } else {
393                 vm_page_activate(fs.m);
394         }
395
396         if (curthread->td_lwp) {
397                 if (fs.hardfault) {
398                         curthread->td_lwp->lwp_ru.ru_majflt++;
399                 } else {
400                         curthread->td_lwp->lwp_ru.ru_minflt++;
401                 }
402         }
403
404         /*
405          * Unlock everything, and return
406          */
407         vm_page_wakeup(fs.m);
408         vm_object_deallocate(fs.first_object);
409
410         return (KERN_SUCCESS);
411 }
412
413 /*
414  * Fault in the specified virtual address in the current process map, 
415  * returning a held VM page or NULL.  See vm_fault_page() for more 
416  * information.
417  */
418 vm_page_t
419 vm_fault_page_quick(vm_offset_t va, vm_prot_t fault_type, int *errorp)
420 {
421         struct lwp *lp = curthread->td_lwp;
422         vm_page_t m;
423
424         m = vm_fault_page(&lp->lwp_vmspace->vm_map, va, 
425                           fault_type, VM_FAULT_NORMAL, errorp);
426         return(m);
427 }
428
429 /*
430  * Fault in the specified virtual address in the specified map, doing all
431  * necessary manipulation of the object store and all necessary I/O.  Return
432  * a held VM page or NULL, and set *errorp.  The related pmap is not
433  * updated.
434  *
435  * The returned page will be properly dirtied if VM_PROT_WRITE was specified,
436  * and marked PG_REFERENCED as well.
437  */
438 vm_page_t
439 vm_fault_page(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
440               int fault_flags, int *errorp)
441 {
442         int result;
443         vm_pindex_t first_pindex;
444         struct faultstate fs;
445
446         mycpu->gd_cnt.v_vm_faults++;
447
448         fs.didlimit = 0;
449         fs.hardfault = 0;
450         fs.fault_flags = fault_flags;
451         KKASSERT((fault_flags & VM_FAULT_WIRE_MASK) == 0);
452
453 RetryFault:
454         /*
455          * Find the vm_map_entry representing the backing store and resolve
456          * the top level object and page index.  This may have the side
457          * effect of executing a copy-on-write on the map entry and/or
458          * creating a shadow object, but will not COW any actual VM pages.
459          *
460          * On success fs.map is left read-locked and various other fields 
461          * are initialized but not otherwise referenced or locked.
462          *
463          * NOTE!  vm_map_lookup will upgrade the fault_type to VM_FAULT_WRITE
464          * if the map entry is a virtual page table and also writable,
465          * so we can set the 'A'accessed bit in the virtual page table entry.
466          */
467         fs.map = map;
468         result = vm_map_lookup(&fs.map, vaddr, fault_type,
469                                &fs.entry, &fs.first_object,
470                                &first_pindex, &fs.first_prot, &fs.wired);
471
472         if (result != KERN_SUCCESS) {
473                 *errorp = result;
474                 return (NULL);
475         }
476
477         /*
478          * fs.map is read-locked
479          *
480          * Misc checks.  Save the map generation number to detect races.
481          */
482         fs.map_generation = fs.map->timestamp;
483
484         if (fs.entry->eflags & MAP_ENTRY_NOFAULT) {
485                 panic("vm_fault: fault on nofault entry, addr: %lx",
486                     (u_long)vaddr);
487         }
488
489         /*
490          * A system map entry may return a NULL object.  No object means
491          * no pager means an unrecoverable kernel fault.
492          */
493         if (fs.first_object == NULL) {
494                 panic("vm_fault: unrecoverable fault at %p in entry %p",
495                         (void *)vaddr, fs.entry);
496         }
497
498         /*
499          * Make a reference to this object to prevent its disposal while we
500          * are messing with it.  Once we have the reference, the map is free
501          * to be diddled.  Since objects reference their shadows (and copies),
502          * they will stay around as well.
503          *
504          * Bump the paging-in-progress count to prevent size changes (e.g.
505          * truncation operations) during I/O.  This must be done after
506          * obtaining the vnode lock in order to avoid possible deadlocks.
507          */
508         vm_object_reference(fs.first_object);
509         fs.vp = vnode_pager_lock(fs.first_object);
510         vm_object_pip_add(fs.first_object, 1);
511
512         fs.lookup_still_valid = TRUE;
513         fs.first_m = NULL;
514         fs.object = fs.first_object;    /* so unlock_and_deallocate works */
515
516         /*
517          * If the entry is wired we cannot change the page protection.
518          */
519         if (fs.wired)
520                 fault_type = fs.first_prot;
521
522         /*
523          * The page we want is at (first_object, first_pindex), but if the
524          * vm_map_entry is VM_MAPTYPE_VPAGETABLE we have to traverse the
525          * page table to figure out the actual pindex.
526          *
527          * NOTE!  DEVELOPMENT IN PROGRESS, THIS IS AN INITIAL IMPLEMENTATION
528          * ONLY
529          */
530         if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) {
531                 result = vm_fault_vpagetable(&fs, &first_pindex,
532                                              fs.entry->aux.master_pde,
533                                              fault_type);
534                 if (result == KERN_TRY_AGAIN)
535                         goto RetryFault;
536                 if (result != KERN_SUCCESS) {
537                         *errorp = result;
538                         return (NULL);
539                 }
540         }
541
542         /*
543          * Now we have the actual (object, pindex), fault in the page.  If
544          * vm_fault_object() fails it will unlock and deallocate the FS
545          * data.   If it succeeds everything remains locked and fs->object
546          * will have an additinal PIP count if it is not equal to
547          * fs->first_object
548          */
549         result = vm_fault_object(&fs, first_pindex, fault_type);
550
551         if (result == KERN_TRY_AGAIN)
552                 goto RetryFault;
553         if (result != KERN_SUCCESS) {
554                 *errorp = result;
555                 return(NULL);
556         }
557
558         /*
559          * On success vm_fault_object() does not unlock or deallocate, and fs.m
560          * will contain a busied page.
561          */
562         unlock_things(&fs);
563
564         /*
565          * Return a held page.  We are not doing any pmap manipulation so do
566          * not set PG_MAPPED.  However, adjust the page flags according to
567          * the fault type because the caller may not use a managed pmapping
568          * (so we don't want to lose the fact that the page will be dirtied
569          * if a write fault was specified).
570          */
571         vm_page_hold(fs.m);
572         vm_page_flag_clear(fs.m, PG_ZERO);
573         if (fault_type & VM_PROT_WRITE)
574                 vm_page_dirty(fs.m);
575
576         /*
577          * Update the pmap.  We really only have to do this if a COW
578          * occured to replace the read-only page with the new page.  For
579          * now just do it unconditionally. XXX
580          */
581         pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, fs.wired);
582         vm_page_flag_set(fs.m, PG_REFERENCED|PG_MAPPED);
583
584         /*
585          * Unbusy the page by activating it.  It remains held and will not
586          * be reclaimed.
587          */
588         vm_page_activate(fs.m);
589
590         if (curthread->td_lwp) {
591                 if (fs.hardfault) {
592                         curthread->td_lwp->lwp_ru.ru_majflt++;
593                 } else {
594                         curthread->td_lwp->lwp_ru.ru_minflt++;
595                 }
596         }
597
598         /*
599          * Unlock everything, and return the held page.
600          */
601         vm_page_wakeup(fs.m);
602         vm_object_deallocate(fs.first_object);
603
604         *errorp = 0;
605         return(fs.m);
606 }
607
608 /*
609  * Fault in the specified
610  */
611 vm_page_t
612 vm_fault_object_page(vm_object_t object, vm_ooffset_t offset,
613                      vm_prot_t fault_type, int fault_flags, int *errorp)
614 {
615         int result;
616         vm_pindex_t first_pindex;
617         struct faultstate fs;
618         struct vm_map_entry entry;
619
620         bzero(&entry, sizeof(entry));
621         entry.object.vm_object = object;
622         entry.maptype = VM_MAPTYPE_NORMAL;
623         entry.protection = entry.max_protection = fault_type;
624
625         fs.didlimit = 0;
626         fs.hardfault = 0;
627         fs.fault_flags = fault_flags;
628         fs.map = NULL;
629         KKASSERT((fault_flags & VM_FAULT_WIRE_MASK) == 0);
630
631 RetryFault:
632         
633         fs.first_object = object;
634         first_pindex = OFF_TO_IDX(offset);
635         fs.entry = &entry;
636         fs.first_prot = fault_type;
637         fs.wired = 0;
638         /*fs.map_generation = 0; unused */
639
640         /*
641          * Make a reference to this object to prevent its disposal while we
642          * are messing with it.  Once we have the reference, the map is free
643          * to be diddled.  Since objects reference their shadows (and copies),
644          * they will stay around as well.
645          *
646          * Bump the paging-in-progress count to prevent size changes (e.g.
647          * truncation operations) during I/O.  This must be done after
648          * obtaining the vnode lock in order to avoid possible deadlocks.
649          */
650         vm_object_reference(fs.first_object);
651         fs.vp = vnode_pager_lock(fs.first_object);
652         vm_object_pip_add(fs.first_object, 1);
653
654         fs.lookup_still_valid = TRUE;
655         fs.first_m = NULL;
656         fs.object = fs.first_object;    /* so unlock_and_deallocate works */
657
658 #if 0
659         /* XXX future - ability to operate on VM object using vpagetable */
660         if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) {
661                 result = vm_fault_vpagetable(&fs, &first_pindex,
662                                              fs.entry->aux.master_pde,
663                                              fault_type);
664                 if (result == KERN_TRY_AGAIN)
665                         goto RetryFault;
666                 if (result != KERN_SUCCESS) {
667                         *errorp = result;
668                         return (NULL);
669                 }
670         }
671 #endif
672
673         /*
674          * Now we have the actual (object, pindex), fault in the page.  If
675          * vm_fault_object() fails it will unlock and deallocate the FS
676          * data.   If it succeeds everything remains locked and fs->object
677          * will have an additinal PIP count if it is not equal to
678          * fs->first_object
679          */
680         result = vm_fault_object(&fs, first_pindex, fault_type);
681
682         if (result == KERN_TRY_AGAIN)
683                 goto RetryFault;
684         if (result != KERN_SUCCESS) {
685                 *errorp = result;
686                 return(NULL);
687         }
688
689         /*
690          * On success vm_fault_object() does not unlock or deallocate, and fs.m
691          * will contain a busied page.
692          */
693         unlock_things(&fs);
694
695         /*
696          * Return a held page.  We are not doing any pmap manipulation so do
697          * not set PG_MAPPED.  However, adjust the page flags according to
698          * the fault type because the caller may not use a managed pmapping
699          * (so we don't want to lose the fact that the page will be dirtied
700          * if a write fault was specified).
701          */
702         vm_page_hold(fs.m);
703         vm_page_flag_clear(fs.m, PG_ZERO);
704         if (fault_type & VM_PROT_WRITE)
705                 vm_page_dirty(fs.m);
706
707         /*
708          * Indicate that the page was accessed.
709          */
710         vm_page_flag_set(fs.m, PG_REFERENCED);
711
712         /*
713          * Unbusy the page by activating it.  It remains held and will not
714          * be reclaimed.
715          */
716         vm_page_activate(fs.m);
717
718         if (curthread->td_lwp) {
719                 if (fs.hardfault) {
720                         mycpu->gd_cnt.v_vm_faults++;
721                         curthread->td_lwp->lwp_ru.ru_majflt++;
722                 } else {
723                         curthread->td_lwp->lwp_ru.ru_minflt++;
724                 }
725         }
726
727         /*
728          * Unlock everything, and return the held page.
729          */
730         vm_page_wakeup(fs.m);
731         vm_object_deallocate(fs.first_object);
732
733         *errorp = 0;
734         return(fs.m);
735 }
736
737 /*
738  * Translate the virtual page number (first_pindex) that is relative
739  * to the address space into a logical page number that is relative to the
740  * backing object.  Use the virtual page table pointed to by (vpte).
741  *
742  * This implements an N-level page table.  Any level can terminate the
743  * scan by setting VPTE_PS.   A linear mapping is accomplished by setting
744  * VPTE_PS in the master page directory entry set via mcontrol(MADV_SETMAP).
745  */
746 static
747 int
748 vm_fault_vpagetable(struct faultstate *fs, vm_pindex_t *pindex,
749                     vpte_t vpte, int fault_type)
750 {
751         struct sf_buf *sf;
752         int vshift = 32 - PAGE_SHIFT;   /* page index bits remaining */
753         int result = KERN_SUCCESS;
754         vpte_t *ptep;
755
756         for (;;) {
757                 /*
758                  * We cannot proceed if the vpte is not valid, not readable
759                  * for a read fault, or not writable for a write fault.
760                  */
761                 if ((vpte & VPTE_V) == 0) {
762                         unlock_and_deallocate(fs);
763                         return (KERN_FAILURE);
764                 }
765                 if ((fault_type & VM_PROT_READ) && (vpte & VPTE_R) == 0) {
766                         unlock_and_deallocate(fs);
767                         return (KERN_FAILURE);
768                 }
769                 if ((fault_type & VM_PROT_WRITE) && (vpte & VPTE_W) == 0) {
770                         unlock_and_deallocate(fs);
771                         return (KERN_FAILURE);
772                 }
773                 if ((vpte & VPTE_PS) || vshift == 0)
774                         break;
775                 KKASSERT(vshift >= VPTE_PAGE_BITS);
776
777                 /*
778                  * Get the page table page.  Nominally we only read the page
779                  * table, but since we are actively setting VPTE_M and VPTE_A,
780                  * tell vm_fault_object() that we are writing it. 
781                  *
782                  * There is currently no real need to optimize this.
783                  */
784                 result = vm_fault_object(fs, vpte >> PAGE_SHIFT,
785                                          VM_PROT_READ|VM_PROT_WRITE);
786                 if (result != KERN_SUCCESS)
787                         return (result);
788
789                 /*
790                  * Process the returned fs.m and look up the page table
791                  * entry in the page table page.
792                  */
793                 vshift -= VPTE_PAGE_BITS;
794                 sf = sf_buf_alloc(fs->m, SFB_CPUPRIVATE);
795                 ptep = ((vpte_t *)sf_buf_kva(sf) +
796                         ((*pindex >> vshift) & VPTE_PAGE_MASK));
797                 vpte = *ptep;
798
799                 /*
800                  * Page table write-back.  If the vpte is valid for the
801                  * requested operation, do a write-back to the page table.
802                  *
803                  * XXX VPTE_M is not set properly for page directory pages.
804                  * It doesn't get set in the page directory if the page table
805                  * is modified during a read access.
806                  */
807                 if ((fault_type & VM_PROT_WRITE) && (vpte & VPTE_V) &&
808                     (vpte & VPTE_W)) {
809                         if ((vpte & (VPTE_M|VPTE_A)) != (VPTE_M|VPTE_A)) {
810                                 atomic_set_int(ptep, VPTE_M|VPTE_A);
811                                 vm_page_dirty(fs->m);
812                         }
813                 }
814                 if ((fault_type & VM_PROT_READ) && (vpte & VPTE_V) &&
815                     (vpte & VPTE_R)) {
816                         if ((vpte & VPTE_A) == 0) {
817                                 atomic_set_int(ptep, VPTE_A);
818                                 vm_page_dirty(fs->m);
819                         }
820                 }
821                 sf_buf_free(sf);
822                 vm_page_flag_set(fs->m, PG_REFERENCED);
823                 vm_page_activate(fs->m);
824                 vm_page_wakeup(fs->m);
825                 cleanup_successful_fault(fs);
826         }
827         /*
828          * Combine remaining address bits with the vpte.
829          */
830         *pindex = (vpte >> PAGE_SHIFT) +
831                   (*pindex & ((1 << vshift) - 1));
832         return (KERN_SUCCESS);
833 }
834
835
836 /*
837  * Do all operations required to fault-in (fs.first_object, pindex).  Run
838  * through the shadow chain as necessary and do required COW or virtual
839  * copy operations.  The caller has already fully resolved the vm_map_entry
840  * and, if appropriate, has created a copy-on-write layer.  All we need to
841  * do is iterate the object chain.
842  *
843  * On failure (fs) is unlocked and deallocated and the caller may return or
844  * retry depending on the failure code.  On success (fs) is NOT unlocked or
845  * deallocated, fs.m will contained a resolved, busied page, and fs.object
846  * will have an additional PIP count if it is not equal to fs.first_object.
847  */
848 static
849 int
850 vm_fault_object(struct faultstate *fs,
851                 vm_pindex_t first_pindex, vm_prot_t fault_type)
852 {
853         vm_object_t next_object;
854         vm_page_t marray[VM_FAULT_READ];
855         vm_pindex_t pindex;
856         int faultcount;
857
858         fs->prot = fs->first_prot;
859         fs->object = fs->first_object;
860         pindex = first_pindex;
861
862         /* 
863          * If a read fault occurs we try to make the page writable if
864          * possible.  There are three cases where we cannot make the
865          * page mapping writable:
866          *
867          * (1) The mapping is read-only or the VM object is read-only,
868          *     fs->prot above will simply not have VM_PROT_WRITE set.
869          *
870          * (2) If the mapping is a virtual page table we need to be able
871          *     to detect writes so we can set VPTE_M in the virtual page
872          *     table.
873          *
874          * (3) If the VM page is read-only or copy-on-write, upgrading would
875          *     just result in an unnecessary COW fault.
876          *
877          * VM_PROT_VPAGED is set if faulting via a virtual page table and
878          * causes adjustments to the 'M'odify bit to also turn off write
879          * access to force a re-fault.
880          */
881         if (fs->entry->maptype == VM_MAPTYPE_VPAGETABLE) {
882                 if ((fault_type & VM_PROT_WRITE) == 0)
883                         fs->prot &= ~VM_PROT_WRITE;
884         }
885
886         for (;;) {
887                 /*
888                  * If the object is dead, we stop here
889                  */
890                 if (fs->object->flags & OBJ_DEAD) {
891                         unlock_and_deallocate(fs);
892                         return (KERN_PROTECTION_FAILURE);
893                 }
894
895                 /*
896                  * See if page is resident.  spl protection is required
897                  * to avoid an interrupt unbusy/free race against our
898                  * lookup.  We must hold the protection through a page
899                  * allocation or busy.
900                  */
901                 crit_enter();
902                 fs->m = vm_page_lookup(fs->object, pindex);
903                 if (fs->m != NULL) {
904                         int queue;
905                         /*
906                          * Wait/Retry if the page is busy.  We have to do this
907                          * if the page is busy via either PG_BUSY or 
908                          * vm_page_t->busy because the vm_pager may be using
909                          * vm_page_t->busy for pageouts ( and even pageins if
910                          * it is the vnode pager ), and we could end up trying
911                          * to pagein and pageout the same page simultaneously.
912                          *
913                          * We can theoretically allow the busy case on a read
914                          * fault if the page is marked valid, but since such
915                          * pages are typically already pmap'd, putting that
916                          * special case in might be more effort then it is 
917                          * worth.  We cannot under any circumstances mess
918                          * around with a vm_page_t->busy page except, perhaps,
919                          * to pmap it.
920                          */
921                         if ((fs->m->flags & PG_BUSY) || fs->m->busy) {
922                                 unlock_things(fs);
923                                 vm_page_sleep_busy(fs->m, TRUE, "vmpfw");
924                                 mycpu->gd_cnt.v_intrans++;
925                                 vm_object_deallocate(fs->first_object);
926                                 crit_exit();
927                                 return (KERN_TRY_AGAIN);
928                         }
929
930                         /*
931                          * If reactivating a page from PQ_CACHE we may have
932                          * to rate-limit.
933                          */
934                         queue = fs->m->queue;
935                         vm_page_unqueue_nowakeup(fs->m);
936
937                         if ((queue - fs->m->pc) == PQ_CACHE && 
938                             vm_page_count_severe()) {
939                                 vm_page_activate(fs->m);
940                                 unlock_and_deallocate(fs);
941                                 vm_waitpfault();
942                                 crit_exit();
943                                 return (KERN_TRY_AGAIN);
944                         }
945
946                         /*
947                          * Mark page busy for other processes, and the 
948                          * pagedaemon.  If it still isn't completely valid
949                          * (readable), jump to readrest, else we found the
950                          * page and can return.
951                          *
952                          * We can release the spl once we have marked the
953                          * page busy.
954                          */
955                         vm_page_busy(fs->m);
956                         crit_exit();
957
958                         if (((fs->m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) &&
959                             fs->m->object != &kernel_object) {
960                                 goto readrest;
961                         }
962                         break; /* break to PAGE HAS BEEN FOUND */
963                 }
964
965                 /*
966                  * Page is not resident, If this is the search termination
967                  * or the pager might contain the page, allocate a new page.
968                  *
969                  * NOTE: We are still in a critical section.
970                  */
971                 if (TRYPAGER(fs) || fs->object == fs->first_object) {
972                         /*
973                          * If the page is beyond the object size we fail
974                          */
975                         if (pindex >= fs->object->size) {
976                                 crit_exit();
977                                 unlock_and_deallocate(fs);
978                                 return (KERN_PROTECTION_FAILURE);
979                         }
980
981                         /*
982                          * Ratelimit.
983                          */
984                         if (fs->didlimit == 0 && curproc != NULL) {
985                                 int limticks;
986
987                                 limticks = vm_fault_ratelimit(curproc->p_vmspace);
988                                 if (limticks) {
989                                         crit_exit();
990                                         unlock_and_deallocate(fs);
991                                         tsleep(curproc, 0, "vmrate", limticks);
992                                         fs->didlimit = 1;
993                                         return (KERN_TRY_AGAIN);
994                                 }
995                         }
996
997                         /*
998                          * Allocate a new page for this object/offset pair.
999                          */
1000                         fs->m = NULL;
1001                         if (!vm_page_count_severe()) {
1002                                 fs->m = vm_page_alloc(fs->object, pindex,
1003                                     (fs->vp || fs->object->backing_object) ? VM_ALLOC_NORMAL : VM_ALLOC_NORMAL | VM_ALLOC_ZERO);
1004                         }
1005                         if (fs->m == NULL) {
1006                                 crit_exit();
1007                                 unlock_and_deallocate(fs);
1008                                 vm_waitpfault();
1009                                 return (KERN_TRY_AGAIN);
1010                         }
1011                 }
1012                 crit_exit();
1013
1014 readrest:
1015                 /*
1016                  * We have found a valid page or we have allocated a new page.
1017                  * The page thus may not be valid or may not be entirely 
1018                  * valid.
1019                  *
1020                  * Attempt to fault-in the page if there is a chance that the
1021                  * pager has it, and potentially fault in additional pages
1022                  * at the same time.
1023                  *
1024                  * We are NOT in splvm here and if TRYPAGER is true then
1025                  * fs.m will be non-NULL and will be PG_BUSY for us.
1026                  */
1027
1028                 if (TRYPAGER(fs)) {
1029                         int rv;
1030                         int reqpage;
1031                         int ahead, behind;
1032                         u_char behavior = vm_map_entry_behavior(fs->entry);
1033
1034                         if (behavior == MAP_ENTRY_BEHAV_RANDOM) {
1035                                 ahead = 0;
1036                                 behind = 0;
1037                         } else {
1038                                 behind = pindex;
1039                                 if (behind > VM_FAULT_READ_BEHIND)
1040                                         behind = VM_FAULT_READ_BEHIND;
1041
1042                                 ahead = fs->object->size - pindex;
1043                                 if (ahead < 1)
1044                                         ahead = 1;
1045                                 if (ahead > VM_FAULT_READ_AHEAD)
1046                                         ahead = VM_FAULT_READ_AHEAD;
1047                         }
1048
1049                         if ((fs->first_object->type != OBJT_DEVICE) &&
1050                             (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL ||
1051                                 (behavior != MAP_ENTRY_BEHAV_RANDOM &&
1052                                 pindex >= fs->entry->lastr &&
1053                                 pindex < fs->entry->lastr + VM_FAULT_READ))
1054                         ) {
1055                                 vm_pindex_t firstpindex, tmppindex;
1056
1057                                 if (first_pindex < 2 * VM_FAULT_READ)
1058                                         firstpindex = 0;
1059                                 else
1060                                         firstpindex = first_pindex - 2 * VM_FAULT_READ;
1061
1062                                 /*
1063                                  * note: partially valid pages cannot be 
1064                                  * included in the lookahead - NFS piecemeal
1065                                  * writes will barf on it badly.
1066                                  *
1067                                  * spl protection is required to avoid races
1068                                  * between the lookup and an interrupt
1069                                  * unbusy/free sequence occuring prior to
1070                                  * our busy check.
1071                                  */
1072                                 crit_enter();
1073                                 for (tmppindex = first_pindex - 1;
1074                                     tmppindex >= firstpindex;
1075                                     --tmppindex
1076                                 ) {
1077                                         vm_page_t mt;
1078
1079                                         mt = vm_page_lookup(fs->first_object, tmppindex);
1080                                         if (mt == NULL || (mt->valid != VM_PAGE_BITS_ALL))
1081                                                 break;
1082                                         if (mt->busy ||
1083                                                 (mt->flags & (PG_BUSY | PG_FICTITIOUS | PG_UNMANAGED)) ||
1084                                                 mt->hold_count ||
1085                                                 mt->wire_count) 
1086                                                 continue;
1087                                         if (mt->dirty == 0)
1088                                                 vm_page_test_dirty(mt);
1089                                         if (mt->dirty) {
1090                                                 vm_page_protect(mt, VM_PROT_NONE);
1091                                                 vm_page_deactivate(mt);
1092                                         } else {
1093                                                 vm_page_cache(mt);
1094                                         }
1095                                 }
1096                                 crit_exit();
1097
1098                                 ahead += behind;
1099                                 behind = 0;
1100                         }
1101
1102                         /*
1103                          * now we find out if any other pages should be paged
1104                          * in at this time this routine checks to see if the
1105                          * pages surrounding this fault reside in the same
1106                          * object as the page for this fault.  If they do,
1107                          * then they are faulted in also into the object.  The
1108                          * array "marray" returned contains an array of
1109                          * vm_page_t structs where one of them is the
1110                          * vm_page_t passed to the routine.  The reqpage
1111                          * return value is the index into the marray for the
1112                          * vm_page_t passed to the routine.
1113                          *
1114                          * fs.m plus the additional pages are PG_BUSY'd.
1115                          */
1116                         faultcount = vm_fault_additional_pages(
1117                             fs->m, behind, ahead, marray, &reqpage);
1118
1119                         /*
1120                          * update lastr imperfectly (we do not know how much
1121                          * getpages will actually read), but good enough.
1122                          */
1123                         fs->entry->lastr = pindex + faultcount - behind;
1124
1125                         /*
1126                          * Call the pager to retrieve the data, if any, after
1127                          * releasing the lock on the map.  We hold a ref on
1128                          * fs.object and the pages are PG_BUSY'd.
1129                          */
1130                         unlock_map(fs);
1131
1132                         if (faultcount) {
1133                                 rv = vm_pager_get_pages(fs->object, marray, 
1134                                                         faultcount, reqpage);
1135                         } else {
1136                                 rv = VM_PAGER_FAIL;
1137                         }
1138
1139                         if (rv == VM_PAGER_OK) {
1140                                 /*
1141                                  * Found the page. Leave it busy while we play
1142                                  * with it.
1143                                  */
1144
1145                                 /*
1146                                  * Relookup in case pager changed page. Pager
1147                                  * is responsible for disposition of old page
1148                                  * if moved.
1149                                  *
1150                                  * XXX other code segments do relookups too.
1151                                  * It's a bad abstraction that needs to be
1152                                  * fixed/removed.
1153                                  */
1154                                 fs->m = vm_page_lookup(fs->object, pindex);
1155                                 if (fs->m == NULL) {
1156                                         unlock_and_deallocate(fs);
1157                                         return (KERN_TRY_AGAIN);
1158                                 }
1159
1160                                 ++fs->hardfault;
1161                                 break; /* break to PAGE HAS BEEN FOUND */
1162                         }
1163
1164                         /*
1165                          * Remove the bogus page (which does not exist at this
1166                          * object/offset); before doing so, we must get back
1167                          * our object lock to preserve our invariant.
1168                          *
1169                          * Also wake up any other process that may want to bring
1170                          * in this page.
1171                          *
1172                          * If this is the top-level object, we must leave the
1173                          * busy page to prevent another process from rushing
1174                          * past us, and inserting the page in that object at
1175                          * the same time that we are.
1176                          */
1177                         if (rv == VM_PAGER_ERROR) {
1178                                 if (curproc)
1179                                         kprintf("vm_fault: pager read error, pid %d (%s)\n", curproc->p_pid, curproc->p_comm);
1180                                 else
1181                                         kprintf("vm_fault: pager read error, thread %p (%s)\n", curthread, curproc->p_comm);
1182                         }
1183                         /*
1184                          * Data outside the range of the pager or an I/O error
1185                          *
1186                          * The page may have been wired during the pagein,
1187                          * e.g. by the buffer cache, and cannot simply be
1188                          * freed.  Call vnode_pager_freepag() to deal with it.
1189                          */
1190                         /*
1191                          * XXX - the check for kernel_map is a kludge to work
1192                          * around having the machine panic on a kernel space
1193                          * fault w/ I/O error.
1194                          */
1195                         if (((fs->map != &kernel_map) && (rv == VM_PAGER_ERROR)) ||
1196                                 (rv == VM_PAGER_BAD)) {
1197                                 vnode_pager_freepage(fs->m);
1198                                 fs->m = NULL;
1199                                 unlock_and_deallocate(fs);
1200                                 if (rv == VM_PAGER_ERROR)
1201                                         return (KERN_FAILURE);
1202                                 else
1203                                         return (KERN_PROTECTION_FAILURE);
1204                                 /* NOT REACHED */
1205                         }
1206                         if (fs->object != fs->first_object) {
1207                                 vnode_pager_freepage(fs->m);
1208                                 fs->m = NULL;
1209                                 /*
1210                                  * XXX - we cannot just fall out at this
1211                                  * point, m has been freed and is invalid!
1212                                  */
1213                         }
1214                 }
1215
1216                 /*
1217                  * We get here if the object has a default pager (or unwiring) 
1218                  * or the pager doesn't have the page.
1219                  */
1220                 if (fs->object == fs->first_object)
1221                         fs->first_m = fs->m;
1222
1223                 /*
1224                  * Move on to the next object.  Lock the next object before
1225                  * unlocking the current one.
1226                  */
1227                 pindex += OFF_TO_IDX(fs->object->backing_object_offset);
1228                 next_object = fs->object->backing_object;
1229                 if (next_object == NULL) {
1230                         /*
1231                          * If there's no object left, fill the page in the top
1232                          * object with zeros.
1233                          */
1234                         if (fs->object != fs->first_object) {
1235                                 vm_object_pip_wakeup(fs->object);
1236
1237                                 fs->object = fs->first_object;
1238                                 pindex = first_pindex;
1239                                 fs->m = fs->first_m;
1240                         }
1241                         fs->first_m = NULL;
1242
1243                         /*
1244                          * Zero the page if necessary and mark it valid.
1245                          */
1246                         if ((fs->m->flags & PG_ZERO) == 0) {
1247                                 vm_page_zero_fill(fs->m);
1248                         } else {
1249                                 mycpu->gd_cnt.v_ozfod++;
1250                         }
1251                         mycpu->gd_cnt.v_zfod++;
1252                         fs->m->valid = VM_PAGE_BITS_ALL;
1253                         break;  /* break to PAGE HAS BEEN FOUND */
1254                 } else {
1255                         if (fs->object != fs->first_object) {
1256                                 vm_object_pip_wakeup(fs->object);
1257                         }
1258                         KASSERT(fs->object != next_object, ("object loop %p", next_object));
1259                         fs->object = next_object;
1260                         vm_object_pip_add(fs->object, 1);
1261                 }
1262         }
1263
1264         KASSERT((fs->m->flags & PG_BUSY) != 0,
1265                 ("vm_fault: not busy after main loop"));
1266
1267         /*
1268          * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock
1269          * is held.]
1270          */
1271
1272         /*
1273          * If the page is being written, but isn't already owned by the
1274          * top-level object, we have to copy it into a new page owned by the
1275          * top-level object.
1276          */
1277         if (fs->object != fs->first_object) {
1278                 /*
1279                  * We only really need to copy if we want to write it.
1280                  */
1281                 if (fault_type & VM_PROT_WRITE) {
1282                         /*
1283                          * This allows pages to be virtually copied from a 
1284                          * backing_object into the first_object, where the 
1285                          * backing object has no other refs to it, and cannot
1286                          * gain any more refs.  Instead of a bcopy, we just 
1287                          * move the page from the backing object to the 
1288                          * first object.  Note that we must mark the page 
1289                          * dirty in the first object so that it will go out 
1290                          * to swap when needed.
1291                          */
1292                         if (
1293                                 /*
1294                                  * Map, if present, has not changed
1295                                  */
1296                                 (fs->map == NULL ||
1297                                 fs->map_generation == fs->map->timestamp) &&
1298                                 /*
1299                                  * Only one shadow object
1300                                  */
1301                                 (fs->object->shadow_count == 1) &&
1302                                 /*
1303                                  * No COW refs, except us
1304                                  */
1305                                 (fs->object->ref_count == 1) &&
1306                                 /*
1307                                  * No one else can look this object up
1308                                  */
1309                                 (fs->object->handle == NULL) &&
1310                                 /*
1311                                  * No other ways to look the object up
1312                                  */
1313                                 ((fs->object->type == OBJT_DEFAULT) ||
1314                                  (fs->object->type == OBJT_SWAP)) &&
1315                                 /*
1316                                  * We don't chase down the shadow chain
1317                                  */
1318                                 (fs->object == fs->first_object->backing_object) &&
1319
1320                                 /*
1321                                  * grab the lock if we need to
1322                                  */
1323                                 (fs->lookup_still_valid ||
1324                                  fs->map == NULL ||
1325                                  lockmgr(&fs->map->lock, LK_EXCLUSIVE|LK_NOWAIT) == 0)
1326                             ) {
1327                                 
1328                                 fs->lookup_still_valid = 1;
1329                                 /*
1330                                  * get rid of the unnecessary page
1331                                  */
1332                                 vm_page_protect(fs->first_m, VM_PROT_NONE);
1333                                 vm_page_free(fs->first_m);
1334                                 fs->first_m = NULL;
1335
1336                                 /*
1337                                  * grab the page and put it into the 
1338                                  * process'es object.  The page is 
1339                                  * automatically made dirty.
1340                                  */
1341                                 vm_page_rename(fs->m, fs->first_object, first_pindex);
1342                                 fs->first_m = fs->m;
1343                                 vm_page_busy(fs->first_m);
1344                                 fs->m = NULL;
1345                                 mycpu->gd_cnt.v_cow_optim++;
1346                         } else {
1347                                 /*
1348                                  * Oh, well, lets copy it.
1349                                  */
1350                                 vm_page_copy(fs->m, fs->first_m);
1351                         }
1352
1353                         if (fs->m) {
1354                                 /*
1355                                  * We no longer need the old page or object.
1356                                  */
1357                                 release_page(fs);
1358                         }
1359
1360                         /*
1361                          * fs->object != fs->first_object due to above 
1362                          * conditional
1363                          */
1364                         vm_object_pip_wakeup(fs->object);
1365
1366                         /*
1367                          * Only use the new page below...
1368                          */
1369
1370                         mycpu->gd_cnt.v_cow_faults++;
1371                         fs->m = fs->first_m;
1372                         fs->object = fs->first_object;
1373                         pindex = first_pindex;
1374                 } else {
1375                         /*
1376                          * If it wasn't a write fault avoid having to copy
1377                          * the page by mapping it read-only.
1378                          */
1379                         fs->prot &= ~VM_PROT_WRITE;
1380                 }
1381         }
1382
1383         /*
1384          * We may have had to unlock a map to do I/O.  If we did then
1385          * lookup_still_valid will be FALSE.  If the map generation count
1386          * also changed then all sorts of things could have happened while
1387          * we were doing the I/O and we need to retry.
1388          */
1389
1390         if (!fs->lookup_still_valid &&
1391             fs->map != NULL &&
1392             (fs->map->timestamp != fs->map_generation)) {
1393                 release_page(fs);
1394                 unlock_and_deallocate(fs);
1395                 return (KERN_TRY_AGAIN);
1396         }
1397
1398         /*
1399          * Put this page into the physical map. We had to do the unlock above
1400          * because pmap_enter may cause other faults.   We don't put the page
1401          * back on the active queue until later so that the page-out daemon
1402          * won't find us (yet).
1403          */
1404         if (fs->prot & VM_PROT_WRITE) {
1405                 vm_page_flag_set(fs->m, PG_WRITEABLE);
1406                 vm_object_set_writeable_dirty(fs->m->object);
1407
1408                 /*
1409                  * If the fault is a write, we know that this page is being
1410                  * written NOW so dirty it explicitly to save on 
1411                  * pmap_is_modified() calls later.
1412                  *
1413                  * If this is a NOSYNC mmap we do not want to set PG_NOSYNC
1414                  * if the page is already dirty to prevent data written with
1415                  * the expectation of being synced from not being synced.
1416                  * Likewise if this entry does not request NOSYNC then make
1417                  * sure the page isn't marked NOSYNC.  Applications sharing
1418                  * data should use the same flags to avoid ping ponging.
1419                  *
1420                  * Also tell the backing pager, if any, that it should remove
1421                  * any swap backing since the page is now dirty.
1422                  */
1423                 if (fs->entry->eflags & MAP_ENTRY_NOSYNC) {
1424                         if (fs->m->dirty == 0)
1425                                 vm_page_flag_set(fs->m, PG_NOSYNC);
1426                 } else {
1427                         vm_page_flag_clear(fs->m, PG_NOSYNC);
1428                 }
1429                 if (fs->fault_flags & VM_FAULT_DIRTY) {
1430                         crit_enter();
1431                         vm_page_dirty(fs->m);
1432                         vm_pager_page_unswapped(fs->m);
1433                         crit_exit();
1434                 }
1435         }
1436
1437         /*
1438          * Page had better still be busy.  We are still locked up and 
1439          * fs->object will have another PIP reference if it is not equal
1440          * to fs->first_object.
1441          */
1442         KASSERT(fs->m->flags & PG_BUSY,
1443                 ("vm_fault: page %p not busy!", fs->m));
1444
1445         /*
1446          * Sanity check: page must be completely valid or it is not fit to
1447          * map into user space.  vm_pager_get_pages() ensures this.
1448          */
1449         if (fs->m->valid != VM_PAGE_BITS_ALL) {
1450                 vm_page_zero_invalid(fs->m, TRUE);
1451                 kprintf("Warning: page %p partially invalid on fault\n", fs->m);
1452         }
1453
1454         return (KERN_SUCCESS);
1455 }
1456
1457 /*
1458  * Wire down a range of virtual addresses in a map.  The entry in question
1459  * should be marked in-transition and the map must be locked.  We must
1460  * release the map temporarily while faulting-in the page to avoid a
1461  * deadlock.  Note that the entry may be clipped while we are blocked but
1462  * will never be freed.
1463  */
1464 int
1465 vm_fault_wire(vm_map_t map, vm_map_entry_t entry, boolean_t user_wire)
1466 {
1467         boolean_t fictitious;
1468         vm_offset_t start;
1469         vm_offset_t end;
1470         vm_offset_t va;
1471         vm_paddr_t pa;
1472         pmap_t pmap;
1473         int rv;
1474
1475         pmap = vm_map_pmap(map);
1476         start = entry->start;
1477         end = entry->end;
1478         fictitious = entry->object.vm_object &&
1479                         (entry->object.vm_object->type == OBJT_DEVICE);
1480
1481         vm_map_unlock(map);
1482         map->timestamp++;
1483
1484         /*
1485          * We simulate a fault to get the page and enter it in the physical
1486          * map.
1487          */
1488         for (va = start; va < end; va += PAGE_SIZE) {
1489                 if (user_wire) {
1490                         rv = vm_fault(map, va, VM_PROT_READ, 
1491                                         VM_FAULT_USER_WIRE);
1492                 } else {
1493                         rv = vm_fault(map, va, VM_PROT_READ|VM_PROT_WRITE,
1494                                         VM_FAULT_CHANGE_WIRING);
1495                 }
1496                 if (rv) {
1497                         while (va > start) {
1498                                 va -= PAGE_SIZE;
1499                                 if ((pa = pmap_extract(pmap, va)) == 0)
1500                                         continue;
1501                                 pmap_change_wiring(pmap, va, FALSE);
1502                                 if (!fictitious)
1503                                         vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1);
1504                         }
1505                         vm_map_lock(map);
1506                         return (rv);
1507                 }
1508         }
1509         vm_map_lock(map);
1510         return (KERN_SUCCESS);
1511 }
1512
1513 /*
1514  * Unwire a range of virtual addresses in a map.  The map should be
1515  * locked.
1516  */
1517 void
1518 vm_fault_unwire(vm_map_t map, vm_map_entry_t entry)
1519 {
1520         boolean_t fictitious;
1521         vm_offset_t start;
1522         vm_offset_t end;
1523         vm_offset_t va;
1524         vm_paddr_t pa;
1525         pmap_t pmap;
1526
1527         pmap = vm_map_pmap(map);
1528         start = entry->start;
1529         end = entry->end;
1530         fictitious = entry->object.vm_object &&
1531                         (entry->object.vm_object->type == OBJT_DEVICE);
1532
1533         /*
1534          * Since the pages are wired down, we must be able to get their
1535          * mappings from the physical map system.
1536          */
1537         for (va = start; va < end; va += PAGE_SIZE) {
1538                 pa = pmap_extract(pmap, va);
1539                 if (pa != 0) {
1540                         pmap_change_wiring(pmap, va, FALSE);
1541                         if (!fictitious)
1542                                 vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1);
1543                 }
1544         }
1545 }
1546
1547 /*
1548  * Reduce the rate at which memory is allocated to a process based
1549  * on the perceived load on the VM system. As the load increases
1550  * the allocation burst rate goes down and the delay increases. 
1551  *
1552  * Rate limiting does not apply when faulting active or inactive
1553  * pages.  When faulting 'cache' pages, rate limiting only applies
1554  * if the system currently has a severe page deficit.
1555  *
1556  * XXX vm_pagesupply should be increased when a page is freed.
1557  *
1558  * We sleep up to 1/10 of a second.
1559  */
1560 static int
1561 vm_fault_ratelimit(struct vmspace *vmspace)
1562 {
1563         if (vm_load_enable == 0)
1564                 return(0);
1565         if (vmspace->vm_pagesupply > 0) {
1566                 --vmspace->vm_pagesupply;
1567                 return(0);
1568         }
1569 #ifdef INVARIANTS
1570         if (vm_load_debug) {
1571                 kprintf("load %-4d give %d pgs, wait %d, pid %-5d (%s)\n",
1572                         vm_load, 
1573                         (1000 - vm_load ) / 10, vm_load * hz / 10000,
1574                         curproc->p_pid, curproc->p_comm);
1575         }
1576 #endif
1577         vmspace->vm_pagesupply = (1000 - vm_load) / 10;
1578         return(vm_load * hz / 10000);
1579 }
1580
1581 /*
1582  *      Routine:
1583  *              vm_fault_copy_entry
1584  *      Function:
1585  *              Copy all of the pages from a wired-down map entry to another.
1586  *
1587  *      In/out conditions:
1588  *              The source and destination maps must be locked for write.
1589  *              The source map entry must be wired down (or be a sharing map
1590  *              entry corresponding to a main map entry that is wired down).
1591  */
1592
1593 void
1594 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
1595     vm_map_entry_t dst_entry, vm_map_entry_t src_entry)
1596 {
1597         vm_object_t dst_object;
1598         vm_object_t src_object;
1599         vm_ooffset_t dst_offset;
1600         vm_ooffset_t src_offset;
1601         vm_prot_t prot;
1602         vm_offset_t vaddr;
1603         vm_page_t dst_m;
1604         vm_page_t src_m;
1605
1606 #ifdef  lint
1607         src_map++;
1608 #endif  /* lint */
1609
1610         src_object = src_entry->object.vm_object;
1611         src_offset = src_entry->offset;
1612
1613         /*
1614          * Create the top-level object for the destination entry. (Doesn't
1615          * actually shadow anything - we copy the pages directly.)
1616          */
1617         vm_map_entry_allocate_object(dst_entry);
1618         dst_object = dst_entry->object.vm_object;
1619
1620         prot = dst_entry->max_protection;
1621
1622         /*
1623          * Loop through all of the pages in the entry's range, copying each
1624          * one from the source object (it should be there) to the destination
1625          * object.
1626          */
1627         for (vaddr = dst_entry->start, dst_offset = 0;
1628             vaddr < dst_entry->end;
1629             vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) {
1630
1631                 /*
1632                  * Allocate a page in the destination object
1633                  */
1634                 do {
1635                         dst_m = vm_page_alloc(dst_object,
1636                                 OFF_TO_IDX(dst_offset), VM_ALLOC_NORMAL);
1637                         if (dst_m == NULL) {
1638                                 vm_wait();
1639                         }
1640                 } while (dst_m == NULL);
1641
1642                 /*
1643                  * Find the page in the source object, and copy it in.
1644                  * (Because the source is wired down, the page will be in
1645                  * memory.)
1646                  */
1647                 src_m = vm_page_lookup(src_object,
1648                         OFF_TO_IDX(dst_offset + src_offset));
1649                 if (src_m == NULL)
1650                         panic("vm_fault_copy_wired: page missing");
1651
1652                 vm_page_copy(src_m, dst_m);
1653
1654                 /*
1655                  * Enter it in the pmap...
1656                  */
1657
1658                 vm_page_flag_clear(dst_m, PG_ZERO);
1659                 pmap_enter(dst_map->pmap, vaddr, dst_m, prot, FALSE);
1660                 vm_page_flag_set(dst_m, PG_WRITEABLE|PG_MAPPED);
1661
1662                 /*
1663                  * Mark it no longer busy, and put it on the active list.
1664                  */
1665                 vm_page_activate(dst_m);
1666                 vm_page_wakeup(dst_m);
1667         }
1668 }
1669
1670
1671 /*
1672  * This routine checks around the requested page for other pages that
1673  * might be able to be faulted in.  This routine brackets the viable
1674  * pages for the pages to be paged in.
1675  *
1676  * Inputs:
1677  *      m, rbehind, rahead
1678  *
1679  * Outputs:
1680  *  marray (array of vm_page_t), reqpage (index of requested page)
1681  *
1682  * Return value:
1683  *  number of pages in marray
1684  */
1685 static int
1686 vm_fault_additional_pages(vm_page_t m, int rbehind, int rahead,
1687     vm_page_t *marray, int *reqpage)
1688 {
1689         int i,j;
1690         vm_object_t object;
1691         vm_pindex_t pindex, startpindex, endpindex, tpindex;
1692         vm_page_t rtm;
1693         int cbehind, cahead;
1694
1695         object = m->object;
1696         pindex = m->pindex;
1697
1698         /*
1699          * we don't fault-ahead for device pager
1700          */
1701         if (object->type == OBJT_DEVICE) {
1702                 *reqpage = 0;
1703                 marray[0] = m;
1704                 return 1;
1705         }
1706
1707         /*
1708          * if the requested page is not available, then give up now
1709          */
1710
1711         if (!vm_pager_has_page(object, pindex, &cbehind, &cahead)) {
1712                 return 0;
1713         }
1714
1715         if ((cbehind == 0) && (cahead == 0)) {
1716                 *reqpage = 0;
1717                 marray[0] = m;
1718                 return 1;
1719         }
1720
1721         if (rahead > cahead) {
1722                 rahead = cahead;
1723         }
1724
1725         if (rbehind > cbehind) {
1726                 rbehind = cbehind;
1727         }
1728
1729         /*
1730          * try to do any readahead that we might have free pages for.
1731          */
1732         if ((rahead + rbehind) >
1733                 ((vmstats.v_free_count + vmstats.v_cache_count) - vmstats.v_free_reserved)) {
1734                 pagedaemon_wakeup();
1735                 marray[0] = m;
1736                 *reqpage = 0;
1737                 return 1;
1738         }
1739
1740         /*
1741          * scan backward for the read behind pages -- in memory 
1742          *
1743          * Assume that if the page is not found an interrupt will not
1744          * create it.  Theoretically interrupts can only remove (busy)
1745          * pages, not create new associations.
1746          */
1747         if (pindex > 0) {
1748                 if (rbehind > pindex) {
1749                         rbehind = pindex;
1750                         startpindex = 0;
1751                 } else {
1752                         startpindex = pindex - rbehind;
1753                 }
1754
1755                 crit_enter();
1756                 for ( tpindex = pindex - 1; tpindex >= startpindex; tpindex -= 1) {
1757                         if (vm_page_lookup( object, tpindex)) {
1758                                 startpindex = tpindex + 1;
1759                                 break;
1760                         }
1761                         if (tpindex == 0)
1762                                 break;
1763                 }
1764
1765                 for(i = 0, tpindex = startpindex; tpindex < pindex; i++, tpindex++) {
1766
1767                         rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL);
1768                         if (rtm == NULL) {
1769                                 crit_exit();
1770                                 for (j = 0; j < i; j++) {
1771                                         vm_page_free(marray[j]);
1772                                 }
1773                                 marray[0] = m;
1774                                 *reqpage = 0;
1775                                 return 1;
1776                         }
1777
1778                         marray[i] = rtm;
1779                 }
1780                 crit_exit();
1781         } else {
1782                 startpindex = 0;
1783                 i = 0;
1784         }
1785
1786         marray[i] = m;
1787         /* page offset of the required page */
1788         *reqpage = i;
1789
1790         tpindex = pindex + 1;
1791         i++;
1792
1793         /*
1794          * scan forward for the read ahead pages
1795          */
1796         endpindex = tpindex + rahead;
1797         if (endpindex > object->size)
1798                 endpindex = object->size;
1799
1800         crit_enter();
1801         for( ; tpindex < endpindex; i++, tpindex++) {
1802
1803                 if (vm_page_lookup(object, tpindex)) {
1804                         break;
1805                 }
1806
1807                 rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL);
1808                 if (rtm == NULL) {
1809                         break;
1810                 }
1811
1812                 marray[i] = rtm;
1813         }
1814         crit_exit();
1815
1816         /* return number of bytes of pages */
1817         return i;
1818 }