Remove the now unused interlock argument to the lockmgr() procedure.
[dragonfly.git] / sys / vm / vm_fault.c
1 /*
2  * Copyright (c) 1991, 1993
3  *      The Regents of the University of California.  All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  *
9  *
10  * This code is derived from software contributed to Berkeley by
11  * The Mach Operating System project at Carnegie-Mellon University.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. All advertising materials mentioning features or use of this software
22  *    must display the following acknowledgement:
23  *      This product includes software developed by the University of
24  *      California, Berkeley and its contributors.
25  * 4. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  *      from: @(#)vm_fault.c    8.4 (Berkeley) 1/12/94
42  *
43  *
44  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
45  * All rights reserved.
46  *
47  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
48  *
49  * Permission to use, copy, modify and distribute this software and
50  * its documentation is hereby granted, provided that both the copyright
51  * notice and this permission notice appear in all copies of the
52  * software, derivative works or modified versions, and any portions
53  * thereof, and that both notices appear in supporting documentation.
54  *
55  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
56  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
57  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
58  *
59  * Carnegie Mellon requests users of this software to return to
60  *
61  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
62  *  School of Computer Science
63  *  Carnegie Mellon University
64  *  Pittsburgh PA 15213-3890
65  *
66  * any improvements or extensions that they make and grant Carnegie the
67  * rights to redistribute these changes.
68  *
69  * $FreeBSD: src/sys/vm/vm_fault.c,v 1.108.2.8 2002/02/26 05:49:27 silby Exp $
70  * $DragonFly: src/sys/vm/vm_fault.c,v 1.22 2006/04/23 03:08:04 dillon Exp $
71  */
72
73 /*
74  *      Page fault handling module.
75  */
76
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/kernel.h>
80 #include <sys/proc.h>
81 #include <sys/vnode.h>
82 #include <sys/resourcevar.h>
83 #include <sys/vmmeter.h>
84
85 #include <vm/vm.h>
86 #include <vm/vm_param.h>
87 #include <sys/lock.h>
88 #include <vm/pmap.h>
89 #include <vm/vm_map.h>
90 #include <vm/vm_object.h>
91 #include <vm/vm_page.h>
92 #include <vm/vm_pageout.h>
93 #include <vm/vm_kern.h>
94 #include <vm/vm_pager.h>
95 #include <vm/vnode_pager.h>
96 #include <vm/vm_extern.h>
97
98 #include <sys/thread2.h>
99 #include <vm/vm_page2.h>
100
101 static int vm_fault_additional_pages (vm_page_t, int,
102                                           int, vm_page_t *, int *);
103 static int vm_fault_ratelimit(struct vmspace *vmspace);
104
105 #define VM_FAULT_READ_AHEAD 8
106 #define VM_FAULT_READ_BEHIND 7
107 #define VM_FAULT_READ (VM_FAULT_READ_AHEAD+VM_FAULT_READ_BEHIND+1)
108
109 struct faultstate {
110         vm_page_t m;
111         vm_object_t object;
112         vm_pindex_t pindex;
113         vm_page_t first_m;
114         vm_object_t     first_object;
115         vm_pindex_t first_pindex;
116         vm_map_t map;
117         vm_map_entry_t entry;
118         int lookup_still_valid;
119         struct vnode *vp;
120 };
121
122 static __inline void
123 release_page(struct faultstate *fs)
124 {
125         vm_page_wakeup(fs->m);
126         vm_page_deactivate(fs->m);
127         fs->m = NULL;
128 }
129
130 static __inline void
131 unlock_map(struct faultstate *fs)
132 {
133         if (fs->lookup_still_valid) {
134                 vm_map_lookup_done(fs->map, fs->entry, 0);
135                 fs->lookup_still_valid = FALSE;
136         }
137 }
138
139 static void
140 _unlock_things(struct faultstate *fs, int dealloc)
141 {
142         vm_object_pip_wakeup(fs->object);
143         if (fs->object != fs->first_object) {
144                 vm_page_free(fs->first_m);
145                 vm_object_pip_wakeup(fs->first_object);
146                 fs->first_m = NULL;
147         }
148         if (dealloc) {
149                 vm_object_deallocate(fs->first_object);
150         }
151         unlock_map(fs); 
152         if (fs->vp != NULL) { 
153                 vput(fs->vp);
154                 fs->vp = NULL;
155         }
156 }
157
158 #define unlock_things(fs) _unlock_things(fs, 0)
159 #define unlock_and_deallocate(fs) _unlock_things(fs, 1)
160
161 /*
162  * TRYPAGER - used by vm_fault to calculate whether the pager for the
163  *            current object *might* contain the page.
164  *
165  *            default objects are zero-fill, there is no real pager.
166  */
167
168 #define TRYPAGER        (fs.object->type != OBJT_DEFAULT && \
169                         (((fault_flags & VM_FAULT_WIRE_MASK) == 0) || wired))
170
171 /*
172  *      vm_fault:
173  *
174  *      Handle a page fault occurring at the given address,
175  *      requiring the given permissions, in the map specified.
176  *      If successful, the page is inserted into the
177  *      associated physical map.
178  *
179  *      NOTE: the given address should be truncated to the
180  *      proper page address.
181  *
182  *      KERN_SUCCESS is returned if the page fault is handled; otherwise,
183  *      a standard error specifying why the fault is fatal is returned.
184  *
185  *
186  *      The map in question must be referenced, and remains so.
187  *      Caller may hold no locks.
188  */
189 int
190 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags)
191 {
192         vm_prot_t prot;
193         int result;
194         boolean_t wired;
195         int map_generation;
196         vm_object_t next_object;
197         vm_page_t marray[VM_FAULT_READ];
198         int hardfault;
199         int faultcount;
200         int limticks;
201         int didlimit = 0;
202         struct faultstate fs;
203
204         mycpu->gd_cnt.v_vm_faults++;
205         hardfault = 0;
206
207 RetryFault:
208         /*
209          * Find the backing store object and offset into it to begin the
210          * search.
211          */
212         fs.map = map;
213         if ((result = vm_map_lookup(&fs.map, vaddr,
214                 fault_type, &fs.entry, &fs.first_object,
215                 &fs.first_pindex, &prot, &wired)) != KERN_SUCCESS) {
216                 if ((result != KERN_PROTECTION_FAILURE) ||
217                         ((fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE)) {
218                         return result;
219                 }
220
221                 /*
222                  * If we are user-wiring a r/w segment, and it is COW, then
223                  * we need to do the COW operation.  Note that we don't COW
224                  * currently RO sections now, because it is NOT desirable
225                  * to COW .text.  We simply keep .text from ever being COW'ed
226                  * and take the heat that one cannot debug wired .text sections.
227                  */
228                 result = vm_map_lookup(&fs.map, vaddr,
229                         VM_PROT_READ|VM_PROT_WRITE|VM_PROT_OVERRIDE_WRITE,
230                         &fs.entry, &fs.first_object, &fs.first_pindex, &prot, &wired);
231                 if (result != KERN_SUCCESS) {
232                         return result;
233                 }
234
235                 /*
236                  * If we don't COW now, on a user wire, the user will never
237                  * be able to write to the mapping.  If we don't make this
238                  * restriction, the bookkeeping would be nearly impossible.
239                  */
240                 if ((fs.entry->protection & VM_PROT_WRITE) == 0)
241                         fs.entry->max_protection &= ~VM_PROT_WRITE;
242         }
243
244         map_generation = fs.map->timestamp;
245
246         if (fs.entry->eflags & MAP_ENTRY_NOFAULT) {
247                 panic("vm_fault: fault on nofault entry, addr: %lx",
248                     (u_long)vaddr);
249         }
250
251         /*
252          * A system map entry may return a NULL object.  No object means
253          * no pager means an unrecoverable kernel fault.
254          */
255         if (fs.first_object == NULL) {
256                 panic("vm_fault: unrecoverable fault at %p in entry %p",
257                         (void *)vaddr, fs.entry);
258         }
259
260         /*
261          * Make a reference to this object to prevent its disposal while we
262          * are messing with it.  Once we have the reference, the map is free
263          * to be diddled.  Since objects reference their shadows (and copies),
264          * they will stay around as well.
265          *
266          * Bump the paging-in-progress count to prevent size changes (e.g.
267          * truncation operations) during I/O.  This must be done after
268          * obtaining the vnode lock in order to avoid possible deadlocks.
269          */
270         vm_object_reference(fs.first_object);
271         fs.vp = vnode_pager_lock(fs.first_object);
272         vm_object_pip_add(fs.first_object, 1);
273
274         if ((fault_type & VM_PROT_WRITE) &&
275                 (fs.first_object->type == OBJT_VNODE)) {
276                 vm_freeze_copyopts(fs.first_object,
277                         fs.first_pindex, fs.first_pindex + 1);
278         }
279
280         fs.lookup_still_valid = TRUE;
281
282         if (wired)
283                 fault_type = prot;
284
285         fs.first_m = NULL;
286
287         /*
288          * Search for the page at object/offset.
289          */
290
291         fs.object = fs.first_object;
292         fs.pindex = fs.first_pindex;
293
294         while (TRUE) {
295                 /*
296                  * If the object is dead, we stop here
297                  */
298
299                 if (fs.object->flags & OBJ_DEAD) {
300                         unlock_and_deallocate(&fs);
301                         return (KERN_PROTECTION_FAILURE);
302                 }
303
304                 /*
305                  * See if page is resident.  spl protection is required
306                  * to avoid an interrupt unbusy/free race against our
307                  * lookup.  We must hold the protection through a page
308                  * allocation or busy.
309                  */
310                 crit_enter();
311                 fs.m = vm_page_lookup(fs.object, fs.pindex);
312                 if (fs.m != NULL) {
313                         int queue;
314                         /*
315                          * Wait/Retry if the page is busy.  We have to do this
316                          * if the page is busy via either PG_BUSY or 
317                          * vm_page_t->busy because the vm_pager may be using
318                          * vm_page_t->busy for pageouts ( and even pageins if
319                          * it is the vnode pager ), and we could end up trying
320                          * to pagein and pageout the same page simultaneously.
321                          *
322                          * We can theoretically allow the busy case on a read
323                          * fault if the page is marked valid, but since such
324                          * pages are typically already pmap'd, putting that
325                          * special case in might be more effort then it is 
326                          * worth.  We cannot under any circumstances mess
327                          * around with a vm_page_t->busy page except, perhaps,
328                          * to pmap it.
329                          */
330                         if ((fs.m->flags & PG_BUSY) || fs.m->busy) {
331                                 unlock_things(&fs);
332                                 vm_page_sleep_busy(fs.m, TRUE, "vmpfw");
333                                 mycpu->gd_cnt.v_intrans++;
334                                 vm_object_deallocate(fs.first_object);
335                                 crit_exit();
336                                 goto RetryFault;
337                         }
338
339                         queue = fs.m->queue;
340                         vm_page_unqueue_nowakeup(fs.m);
341
342                         if ((queue - fs.m->pc) == PQ_CACHE && vm_page_count_severe()) {
343                                 vm_page_activate(fs.m);
344                                 unlock_and_deallocate(&fs);
345                                 vm_waitpfault();
346                                 crit_exit();
347                                 goto RetryFault;
348                         }
349
350                         /*
351                          * Mark page busy for other processes, and the 
352                          * pagedaemon.  If it still isn't completely valid
353                          * (readable), jump to readrest, else break-out ( we
354                          * found the page ).
355                          *
356                          * We can release the spl once we have marked the
357                          * page busy.
358                          */
359
360                         vm_page_busy(fs.m);
361                         crit_exit();
362
363                         if (((fs.m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) &&
364                                 fs.m->object != kernel_object && fs.m->object != kmem_object) {
365                                 goto readrest;
366                         }
367
368                         break;
369                 }
370
371                 /*
372                  * Page is not resident, If this is the search termination
373                  * or the pager might contain the page, allocate a new page.
374                  *
375                  * note: we are still in splvm().
376                  */
377
378                 if (TRYPAGER || fs.object == fs.first_object) {
379                         if (fs.pindex >= fs.object->size) {
380                                 crit_exit();
381                                 unlock_and_deallocate(&fs);
382                                 return (KERN_PROTECTION_FAILURE);
383                         }
384
385                         /*
386                          * Ratelimit.
387                          */
388                         if (didlimit == 0) {
389                                 limticks = 
390                                         vm_fault_ratelimit(curproc->p_vmspace);
391                                 if (limticks) {
392                                         crit_exit();
393                                         unlock_and_deallocate(&fs);
394                                         tsleep(curproc, 0, "vmrate", limticks);
395                                         didlimit = 1;
396                                         goto RetryFault;
397                                 }
398                         }
399
400                         /*
401                          * Allocate a new page for this object/offset pair.
402                          */
403                         fs.m = NULL;
404                         if (!vm_page_count_severe()) {
405                                 fs.m = vm_page_alloc(fs.object, fs.pindex,
406                                     (fs.vp || fs.object->backing_object)? VM_ALLOC_NORMAL: VM_ALLOC_NORMAL | VM_ALLOC_ZERO);
407                         }
408                         if (fs.m == NULL) {
409                                 crit_exit();
410                                 unlock_and_deallocate(&fs);
411                                 vm_waitpfault();
412                                 goto RetryFault;
413                         }
414                 }
415                 crit_exit();
416
417 readrest:
418                 /*
419                  * We have found a valid page or we have allocated a new page.
420                  * The page thus may not be valid or may not be entirely 
421                  * valid.
422                  *
423                  * Attempt to fault-in the page if there is a chance that the
424                  * pager has it, and potentially fault in additional pages
425                  * at the same time.
426                  *
427                  * We are NOT in splvm here and if TRYPAGER is true then
428                  * fs.m will be non-NULL and will be PG_BUSY for us.
429                  */
430
431                 if (TRYPAGER) {
432                         int rv;
433                         int reqpage;
434                         int ahead, behind;
435                         u_char behavior = vm_map_entry_behavior(fs.entry);
436
437                         if (behavior == MAP_ENTRY_BEHAV_RANDOM) {
438                                 ahead = 0;
439                                 behind = 0;
440                         } else {
441                                 behind = (vaddr - fs.entry->start) >> PAGE_SHIFT;
442                                 if (behind > VM_FAULT_READ_BEHIND)
443                                         behind = VM_FAULT_READ_BEHIND;
444
445                                 ahead = ((fs.entry->end - vaddr) >> PAGE_SHIFT) - 1;
446                                 if (ahead > VM_FAULT_READ_AHEAD)
447                                         ahead = VM_FAULT_READ_AHEAD;
448                         }
449
450                         if ((fs.first_object->type != OBJT_DEVICE) &&
451                             (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL ||
452                                 (behavior != MAP_ENTRY_BEHAV_RANDOM &&
453                                 fs.pindex >= fs.entry->lastr &&
454                                 fs.pindex < fs.entry->lastr + VM_FAULT_READ))
455                         ) {
456                                 vm_pindex_t firstpindex, tmppindex;
457
458                                 if (fs.first_pindex < 2 * VM_FAULT_READ)
459                                         firstpindex = 0;
460                                 else
461                                         firstpindex = fs.first_pindex - 2 * VM_FAULT_READ;
462
463                                 /*
464                                  * note: partially valid pages cannot be 
465                                  * included in the lookahead - NFS piecemeal
466                                  * writes will barf on it badly.
467                                  *
468                                  * spl protection is required to avoid races
469                                  * between the lookup and an interrupt
470                                  * unbusy/free sequence occuring prior to
471                                  * our busy check.
472                                  */
473                                 crit_enter();
474                                 for (tmppindex = fs.first_pindex - 1;
475                                     tmppindex >= firstpindex;
476                                     --tmppindex
477                                 ) {
478                                         vm_page_t mt;
479                                         mt = vm_page_lookup( fs.first_object, tmppindex);
480                                         if (mt == NULL || (mt->valid != VM_PAGE_BITS_ALL))
481                                                 break;
482                                         if (mt->busy ||
483                                                 (mt->flags & (PG_BUSY | PG_FICTITIOUS | PG_UNMANAGED)) ||
484                                                 mt->hold_count ||
485                                                 mt->wire_count) 
486                                                 continue;
487                                         if (mt->dirty == 0)
488                                                 vm_page_test_dirty(mt);
489                                         if (mt->dirty) {
490                                                 vm_page_protect(mt, VM_PROT_NONE);
491                                                 vm_page_deactivate(mt);
492                                         } else {
493                                                 vm_page_cache(mt);
494                                         }
495                                 }
496                                 crit_exit();
497
498                                 ahead += behind;
499                                 behind = 0;
500                         }
501
502                         /*
503                          * now we find out if any other pages should be paged
504                          * in at this time this routine checks to see if the
505                          * pages surrounding this fault reside in the same
506                          * object as the page for this fault.  If they do,
507                          * then they are faulted in also into the object.  The
508                          * array "marray" returned contains an array of
509                          * vm_page_t structs where one of them is the
510                          * vm_page_t passed to the routine.  The reqpage
511                          * return value is the index into the marray for the
512                          * vm_page_t passed to the routine.
513                          *
514                          * fs.m plus the additional pages are PG_BUSY'd.
515                          */
516                         faultcount = vm_fault_additional_pages(
517                             fs.m, behind, ahead, marray, &reqpage);
518
519                         /*
520                          * update lastr imperfectly (we do not know how much
521                          * getpages will actually read), but good enough.
522                          */
523                         fs.entry->lastr = fs.pindex + faultcount - behind;
524
525                         /*
526                          * Call the pager to retrieve the data, if any, after
527                          * releasing the lock on the map.  We hold a ref on
528                          * fs.object and the pages are PG_BUSY'd.
529                          */
530                         unlock_map(&fs);
531
532                         rv = faultcount ?
533                             vm_pager_get_pages(fs.object, marray, faultcount,
534                                 reqpage) : VM_PAGER_FAIL;
535
536                         if (rv == VM_PAGER_OK) {
537                                 /*
538                                  * Found the page. Leave it busy while we play
539                                  * with it.
540                                  */
541
542                                 /*
543                                  * Relookup in case pager changed page. Pager
544                                  * is responsible for disposition of old page
545                                  * if moved.
546                                  *
547                                  * XXX other code segments do relookups too.
548                                  * It's a bad abstraction that needs to be
549                                  * fixed/removed.
550                                  */
551                                 fs.m = vm_page_lookup(fs.object, fs.pindex);
552                                 if (fs.m == NULL) {
553                                         unlock_and_deallocate(&fs);
554                                         goto RetryFault;
555                                 }
556
557                                 hardfault++;
558                                 break; /* break to PAGE HAS BEEN FOUND */
559                         }
560                         /*
561                          * Remove the bogus page (which does not exist at this
562                          * object/offset); before doing so, we must get back
563                          * our object lock to preserve our invariant.
564                          *
565                          * Also wake up any other process that may want to bring
566                          * in this page.
567                          *
568                          * If this is the top-level object, we must leave the
569                          * busy page to prevent another process from rushing
570                          * past us, and inserting the page in that object at
571                          * the same time that we are.
572                          */
573
574                         if (rv == VM_PAGER_ERROR)
575                                 printf("vm_fault: pager read error, pid %d (%s)\n",
576                                     curproc->p_pid, curproc->p_comm);
577                         /*
578                          * Data outside the range of the pager or an I/O error
579                          */
580                         /*
581                          * XXX - the check for kernel_map is a kludge to work
582                          * around having the machine panic on a kernel space
583                          * fault w/ I/O error.
584                          */
585                         if (((fs.map != kernel_map) && (rv == VM_PAGER_ERROR)) ||
586                                 (rv == VM_PAGER_BAD)) {
587                                 vm_page_free(fs.m);
588                                 fs.m = NULL;
589                                 unlock_and_deallocate(&fs);
590                                 return ((rv == VM_PAGER_ERROR) ? KERN_FAILURE : KERN_PROTECTION_FAILURE);
591                         }
592                         if (fs.object != fs.first_object) {
593                                 vm_page_free(fs.m);
594                                 fs.m = NULL;
595                                 /*
596                                  * XXX - we cannot just fall out at this
597                                  * point, m has been freed and is invalid!
598                                  */
599                         }
600                 }
601
602                 /*
603                  * We get here if the object has default pager (or unwiring) 
604                  * or the pager doesn't have the page.
605                  */
606                 if (fs.object == fs.first_object)
607                         fs.first_m = fs.m;
608
609                 /*
610                  * Move on to the next object.  Lock the next object before
611                  * unlocking the current one.
612                  */
613
614                 fs.pindex += OFF_TO_IDX(fs.object->backing_object_offset);
615                 next_object = fs.object->backing_object;
616                 if (next_object == NULL) {
617                         /*
618                          * If there's no object left, fill the page in the top
619                          * object with zeros.
620                          */
621                         if (fs.object != fs.first_object) {
622                                 vm_object_pip_wakeup(fs.object);
623
624                                 fs.object = fs.first_object;
625                                 fs.pindex = fs.first_pindex;
626                                 fs.m = fs.first_m;
627                         }
628                         fs.first_m = NULL;
629
630                         /*
631                          * Zero the page if necessary and mark it valid.
632                          */
633                         if ((fs.m->flags & PG_ZERO) == 0) {
634                                 vm_page_zero_fill(fs.m);
635                         } else {
636                                 mycpu->gd_cnt.v_ozfod++;
637                         }
638                         mycpu->gd_cnt.v_zfod++;
639                         fs.m->valid = VM_PAGE_BITS_ALL;
640                         break;  /* break to PAGE HAS BEEN FOUND */
641                 } else {
642                         if (fs.object != fs.first_object) {
643                                 vm_object_pip_wakeup(fs.object);
644                         }
645                         KASSERT(fs.object != next_object, ("object loop %p", next_object));
646                         fs.object = next_object;
647                         vm_object_pip_add(fs.object, 1);
648                 }
649         }
650
651         KASSERT((fs.m->flags & PG_BUSY) != 0,
652             ("vm_fault: not busy after main loop"));
653
654         /*
655          * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock
656          * is held.]
657          */
658
659         /*
660          * If the page is being written, but isn't already owned by the
661          * top-level object, we have to copy it into a new page owned by the
662          * top-level object.
663          */
664
665         if (fs.object != fs.first_object) {
666                 /*
667                  * We only really need to copy if we want to write it.
668                  */
669
670                 if (fault_type & VM_PROT_WRITE) {
671                         /*
672                          * This allows pages to be virtually copied from a 
673                          * backing_object into the first_object, where the 
674                          * backing object has no other refs to it, and cannot
675                          * gain any more refs.  Instead of a bcopy, we just 
676                          * move the page from the backing object to the 
677                          * first object.  Note that we must mark the page 
678                          * dirty in the first object so that it will go out 
679                          * to swap when needed.
680                          */
681                         if (map_generation == fs.map->timestamp &&
682                                 /*
683                                  * Only one shadow object
684                                  */
685                                 (fs.object->shadow_count == 1) &&
686                                 /*
687                                  * No COW refs, except us
688                                  */
689                                 (fs.object->ref_count == 1) &&
690                                 /*
691                                  * No one else can look this object up
692                                  */
693                                 (fs.object->handle == NULL) &&
694                                 /*
695                                  * No other ways to look the object up
696                                  */
697                                 ((fs.object->type == OBJT_DEFAULT) ||
698                                  (fs.object->type == OBJT_SWAP)) &&
699                                 /*
700                                  * We don't chase down the shadow chain
701                                  */
702                                 (fs.object == fs.first_object->backing_object) &&
703
704                                 /*
705                                  * grab the lock if we need to
706                                  */
707                                 (fs.lookup_still_valid ||
708                                  lockmgr(&fs.map->lock, LK_EXCLUSIVE|LK_NOWAIT, curthread) == 0)
709                             ) {
710                                 
711                                 fs.lookup_still_valid = 1;
712                                 /*
713                                  * get rid of the unnecessary page
714                                  */
715                                 vm_page_protect(fs.first_m, VM_PROT_NONE);
716                                 vm_page_free(fs.first_m);
717                                 fs.first_m = NULL;
718
719                                 /*
720                                  * grab the page and put it into the 
721                                  * process'es object.  The page is 
722                                  * automatically made dirty.
723                                  */
724                                 vm_page_rename(fs.m, fs.first_object, fs.first_pindex);
725                                 fs.first_m = fs.m;
726                                 vm_page_busy(fs.first_m);
727                                 fs.m = NULL;
728                                 mycpu->gd_cnt.v_cow_optim++;
729                         } else {
730                                 /*
731                                  * Oh, well, lets copy it.
732                                  */
733                                 vm_page_copy(fs.m, fs.first_m);
734                         }
735
736                         if (fs.m) {
737                                 /*
738                                  * We no longer need the old page or object.
739                                  */
740                                 release_page(&fs);
741                         }
742
743                         /*
744                          * fs.object != fs.first_object due to above 
745                          * conditional
746                          */
747
748                         vm_object_pip_wakeup(fs.object);
749
750                         /*
751                          * Only use the new page below...
752                          */
753
754                         mycpu->gd_cnt.v_cow_faults++;
755                         fs.m = fs.first_m;
756                         fs.object = fs.first_object;
757                         fs.pindex = fs.first_pindex;
758
759                 } else {
760                         prot &= ~VM_PROT_WRITE;
761                 }
762         }
763
764         /*
765          * We must verify that the maps have not changed since our last
766          * lookup.
767          */
768
769         if (!fs.lookup_still_valid &&
770                 (fs.map->timestamp != map_generation)) {
771                 vm_object_t retry_object;
772                 vm_pindex_t retry_pindex;
773                 vm_prot_t retry_prot;
774
775                 /*
776                  * Since map entries may be pageable, make sure we can take a
777                  * page fault on them.
778                  */
779
780                 /*
781                  * Unlock vnode before the lookup to avoid deadlock.   E.G.
782                  * avoid a deadlock between the inode and exec_map that can
783                  * occur due to locks being obtained in different orders.
784                  */
785
786                 if (fs.vp != NULL) {
787                         vput(fs.vp);
788                         fs.vp = NULL;
789                 }
790                 
791                 if (fs.map->infork) {
792                         release_page(&fs);
793                         unlock_and_deallocate(&fs);
794                         goto RetryFault;
795                 }
796
797                 /*
798                  * To avoid trying to write_lock the map while another process
799                  * has it read_locked (in vm_map_wire), we do not try for
800                  * write permission.  If the page is still writable, we will
801                  * get write permission.  If it is not, or has been marked
802                  * needs_copy, we enter the mapping without write permission,
803                  * and will merely take another fault.
804                  */
805                 result = vm_map_lookup(&fs.map, vaddr, fault_type & ~VM_PROT_WRITE,
806                     &fs.entry, &retry_object, &retry_pindex, &retry_prot, &wired);
807                 map_generation = fs.map->timestamp;
808
809                 /*
810                  * If we don't need the page any longer, put it on the active
811                  * list (the easiest thing to do here).  If no one needs it,
812                  * pageout will grab it eventually.
813                  */
814
815                 if (result != KERN_SUCCESS) {
816                         release_page(&fs);
817                         unlock_and_deallocate(&fs);
818                         return (result);
819                 }
820                 fs.lookup_still_valid = TRUE;
821
822                 if ((retry_object != fs.first_object) ||
823                     (retry_pindex != fs.first_pindex)) {
824                         release_page(&fs);
825                         unlock_and_deallocate(&fs);
826                         goto RetryFault;
827                 }
828                 /*
829                  * Check whether the protection has changed or the object has
830                  * been copied while we left the map unlocked. Changing from
831                  * read to write permission is OK - we leave the page
832                  * write-protected, and catch the write fault. Changing from
833                  * write to read permission means that we can't mark the page
834                  * write-enabled after all.
835                  */
836                 prot &= retry_prot;
837         }
838
839         /*
840          * Put this page into the physical map. We had to do the unlock above
841          * because pmap_enter may cause other faults.   We don't put the page
842          * back on the active queue until later so that the page-out daemon
843          * won't find us (yet).
844          */
845
846         if (prot & VM_PROT_WRITE) {
847                 vm_page_flag_set(fs.m, PG_WRITEABLE);
848                 vm_object_set_writeable_dirty(fs.m->object);
849
850                 /*
851                  * If the fault is a write, we know that this page is being
852                  * written NOW so dirty it explicitly to save on 
853                  * pmap_is_modified() calls later.
854                  *
855                  * If this is a NOSYNC mmap we do not want to set PG_NOSYNC
856                  * if the page is already dirty to prevent data written with
857                  * the expectation of being synced from not being synced.
858                  * Likewise if this entry does not request NOSYNC then make
859                  * sure the page isn't marked NOSYNC.  Applications sharing
860                  * data should use the same flags to avoid ping ponging.
861                  *
862                  * Also tell the backing pager, if any, that it should remove
863                  * any swap backing since the page is now dirty.
864                  */
865                 if (fs.entry->eflags & MAP_ENTRY_NOSYNC) {
866                         if (fs.m->dirty == 0)
867                                 vm_page_flag_set(fs.m, PG_NOSYNC);
868                 } else {
869                         vm_page_flag_clear(fs.m, PG_NOSYNC);
870                 }
871                 if (fault_flags & VM_FAULT_DIRTY) {
872                         crit_enter();
873                         vm_page_dirty(fs.m);
874                         vm_pager_page_unswapped(fs.m);
875                         crit_exit();
876                 }
877         }
878
879         /*
880          * Page had better still be busy
881          */
882
883         KASSERT(fs.m->flags & PG_BUSY,
884                 ("vm_fault: page %p not busy!", fs.m));
885
886         unlock_things(&fs);
887
888         /*
889          * Sanity check: page must be completely valid or it is not fit to
890          * map into user space.  vm_pager_get_pages() ensures this.
891          */
892
893         if (fs.m->valid != VM_PAGE_BITS_ALL) {
894                 vm_page_zero_invalid(fs.m, TRUE);
895                 printf("Warning: page %p partially invalid on fault\n", fs.m);
896         }
897
898         pmap_enter(fs.map->pmap, vaddr, fs.m, prot, wired);
899
900         if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0)) {
901                 pmap_prefault(fs.map->pmap, vaddr, fs.entry);
902         }
903
904         vm_page_flag_clear(fs.m, PG_ZERO);
905         vm_page_flag_set(fs.m, PG_MAPPED|PG_REFERENCED);
906         if (fault_flags & VM_FAULT_HOLD)
907                 vm_page_hold(fs.m);
908
909         /*
910          * If the page is not wired down, then put it where the pageout daemon
911          * can find it.
912          */
913
914         if (fault_flags & VM_FAULT_WIRE_MASK) {
915                 if (wired)
916                         vm_page_wire(fs.m);
917                 else
918                         vm_page_unwire(fs.m, 1);
919         } else {
920                 vm_page_activate(fs.m);
921         }
922
923         if (curproc && (curproc->p_flag & P_SWAPPEDOUT) == 0 &&
924             curproc->p_stats) {
925                 if (hardfault) {
926                         curproc->p_stats->p_ru.ru_majflt++;
927                 } else {
928                         curproc->p_stats->p_ru.ru_minflt++;
929                 }
930         }
931
932         /*
933          * Unlock everything, and return
934          */
935
936         vm_page_wakeup(fs.m);
937         vm_object_deallocate(fs.first_object);
938
939         return (KERN_SUCCESS);
940
941 }
942
943 /*
944  * quick version of vm_fault
945  */
946 int
947 vm_fault_quick(caddr_t v, int prot)
948 {
949         int r;
950
951         if (prot & VM_PROT_WRITE)
952                 r = subyte(v, fubyte(v));
953         else
954                 r = fubyte(v);
955         return(r);
956 }
957
958 /*
959  * Wire down a range of virtual addresses in a map.  The entry in question
960  * should be marked in-transition and the map must be locked.  We must
961  * release the map temporarily while faulting-in the page to avoid a
962  * deadlock.  Note that the entry may be clipped while we are blocked but
963  * will never be freed.
964  */
965 int
966 vm_fault_wire(vm_map_t map, vm_map_entry_t entry, boolean_t user_wire)
967 {
968         boolean_t fictitious;
969         vm_offset_t start;
970         vm_offset_t end;
971         vm_offset_t va;
972         vm_paddr_t pa;
973         pmap_t pmap;
974         int rv;
975
976         pmap = vm_map_pmap(map);
977         start = entry->start;
978         end = entry->end;
979         fictitious = entry->object.vm_object &&
980                         (entry->object.vm_object->type == OBJT_DEVICE);
981
982         vm_map_unlock(map);
983         map->timestamp++;
984
985         /*
986          * We simulate a fault to get the page and enter it in the physical
987          * map.
988          */
989         for (va = start; va < end; va += PAGE_SIZE) {
990                 if (user_wire) {
991                         rv = vm_fault(map, va, VM_PROT_READ, 
992                                         VM_FAULT_USER_WIRE);
993                 } else {
994                         rv = vm_fault(map, va, VM_PROT_READ|VM_PROT_WRITE,
995                                         VM_FAULT_CHANGE_WIRING);
996                 }
997                 if (rv) {
998                         while (va > start) {
999                                 va -= PAGE_SIZE;
1000                                 if ((pa = pmap_extract(pmap, va)) == 0)
1001                                         continue;
1002                                 pmap_change_wiring(pmap, va, FALSE);
1003                                 if (!fictitious)
1004                                         vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1);
1005                         }
1006                         vm_map_lock(map);
1007                         return (rv);
1008                 }
1009         }
1010         vm_map_lock(map);
1011         return (KERN_SUCCESS);
1012 }
1013
1014 /*
1015  * Unwire a range of virtual addresses in a map.  The map should be
1016  * locked.
1017  */
1018 void
1019 vm_fault_unwire(vm_map_t map, vm_map_entry_t entry)
1020 {
1021         boolean_t fictitious;
1022         vm_offset_t start;
1023         vm_offset_t end;
1024         vm_offset_t va;
1025         vm_paddr_t pa;
1026         pmap_t pmap;
1027
1028         pmap = vm_map_pmap(map);
1029         start = entry->start;
1030         end = entry->end;
1031         fictitious = entry->object.vm_object &&
1032                         (entry->object.vm_object->type == OBJT_DEVICE);
1033
1034         /*
1035          * Since the pages are wired down, we must be able to get their
1036          * mappings from the physical map system.
1037          */
1038         for (va = start; va < end; va += PAGE_SIZE) {
1039                 pa = pmap_extract(pmap, va);
1040                 if (pa != 0) {
1041                         pmap_change_wiring(pmap, va, FALSE);
1042                         if (!fictitious)
1043                                 vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1);
1044                 }
1045         }
1046 }
1047
1048 /*
1049  * Reduce the rate at which memory is allocated to a process based
1050  * on the perceived load on the VM system. As the load increases
1051  * the allocation burst rate goes down and the delay increases. 
1052  *
1053  * Rate limiting does not apply when faulting active or inactive
1054  * pages.  When faulting 'cache' pages, rate limiting only applies
1055  * if the system currently has a severe page deficit.
1056  *
1057  * XXX vm_pagesupply should be increased when a page is freed.
1058  *
1059  * We sleep up to 1/10 of a second.
1060  */
1061 static int
1062 vm_fault_ratelimit(struct vmspace *vmspace)
1063 {
1064         if (vm_load_enable == 0)
1065                 return(0);
1066         if (vmspace->vm_pagesupply > 0) {
1067                 --vmspace->vm_pagesupply;
1068                 return(0);
1069         }
1070 #ifdef INVARIANTS
1071         if (vm_load_debug) {
1072                 printf("load %-4d give %d pgs, wait %d, pid %-5d (%s)\n",
1073                         vm_load, 
1074                         (1000 - vm_load ) / 10, vm_load * hz / 10000,
1075                         curproc->p_pid, curproc->p_comm);
1076         }
1077 #endif
1078         vmspace->vm_pagesupply = (1000 - vm_load) / 10;
1079         return(vm_load * hz / 10000);
1080 }
1081
1082 /*
1083  *      Routine:
1084  *              vm_fault_copy_entry
1085  *      Function:
1086  *              Copy all of the pages from a wired-down map entry to another.
1087  *
1088  *      In/out conditions:
1089  *              The source and destination maps must be locked for write.
1090  *              The source map entry must be wired down (or be a sharing map
1091  *              entry corresponding to a main map entry that is wired down).
1092  */
1093
1094 void
1095 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
1096     vm_map_entry_t dst_entry, vm_map_entry_t src_entry)
1097 {
1098         vm_object_t dst_object;
1099         vm_object_t src_object;
1100         vm_ooffset_t dst_offset;
1101         vm_ooffset_t src_offset;
1102         vm_prot_t prot;
1103         vm_offset_t vaddr;
1104         vm_page_t dst_m;
1105         vm_page_t src_m;
1106
1107 #ifdef  lint
1108         src_map++;
1109 #endif  /* lint */
1110
1111         src_object = src_entry->object.vm_object;
1112         src_offset = src_entry->offset;
1113
1114         /*
1115          * Create the top-level object for the destination entry. (Doesn't
1116          * actually shadow anything - we copy the pages directly.)
1117          */
1118         dst_object = vm_object_allocate(OBJT_DEFAULT,
1119             (vm_size_t) OFF_TO_IDX(dst_entry->end - dst_entry->start));
1120
1121         dst_entry->object.vm_object = dst_object;
1122         dst_entry->offset = 0;
1123
1124         prot = dst_entry->max_protection;
1125
1126         /*
1127          * Loop through all of the pages in the entry's range, copying each
1128          * one from the source object (it should be there) to the destination
1129          * object.
1130          */
1131         for (vaddr = dst_entry->start, dst_offset = 0;
1132             vaddr < dst_entry->end;
1133             vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) {
1134
1135                 /*
1136                  * Allocate a page in the destination object
1137                  */
1138                 do {
1139                         dst_m = vm_page_alloc(dst_object,
1140                                 OFF_TO_IDX(dst_offset), VM_ALLOC_NORMAL);
1141                         if (dst_m == NULL) {
1142                                 vm_wait();
1143                         }
1144                 } while (dst_m == NULL);
1145
1146                 /*
1147                  * Find the page in the source object, and copy it in.
1148                  * (Because the source is wired down, the page will be in
1149                  * memory.)
1150                  */
1151                 src_m = vm_page_lookup(src_object,
1152                         OFF_TO_IDX(dst_offset + src_offset));
1153                 if (src_m == NULL)
1154                         panic("vm_fault_copy_wired: page missing");
1155
1156                 vm_page_copy(src_m, dst_m);
1157
1158                 /*
1159                  * Enter it in the pmap...
1160                  */
1161
1162                 vm_page_flag_clear(dst_m, PG_ZERO);
1163                 pmap_enter(dst_map->pmap, vaddr, dst_m, prot, FALSE);
1164                 vm_page_flag_set(dst_m, PG_WRITEABLE|PG_MAPPED);
1165
1166                 /*
1167                  * Mark it no longer busy, and put it on the active list.
1168                  */
1169                 vm_page_activate(dst_m);
1170                 vm_page_wakeup(dst_m);
1171         }
1172 }
1173
1174
1175 /*
1176  * This routine checks around the requested page for other pages that
1177  * might be able to be faulted in.  This routine brackets the viable
1178  * pages for the pages to be paged in.
1179  *
1180  * Inputs:
1181  *      m, rbehind, rahead
1182  *
1183  * Outputs:
1184  *  marray (array of vm_page_t), reqpage (index of requested page)
1185  *
1186  * Return value:
1187  *  number of pages in marray
1188  */
1189 static int
1190 vm_fault_additional_pages(vm_page_t m, int rbehind, int rahead,
1191     vm_page_t *marray, int *reqpage)
1192 {
1193         int i,j;
1194         vm_object_t object;
1195         vm_pindex_t pindex, startpindex, endpindex, tpindex;
1196         vm_page_t rtm;
1197         int cbehind, cahead;
1198
1199         object = m->object;
1200         pindex = m->pindex;
1201
1202         /*
1203          * we don't fault-ahead for device pager
1204          */
1205         if (object->type == OBJT_DEVICE) {
1206                 *reqpage = 0;
1207                 marray[0] = m;
1208                 return 1;
1209         }
1210
1211         /*
1212          * if the requested page is not available, then give up now
1213          */
1214
1215         if (!vm_pager_has_page(object, pindex, &cbehind, &cahead)) {
1216                 return 0;
1217         }
1218
1219         if ((cbehind == 0) && (cahead == 0)) {
1220                 *reqpage = 0;
1221                 marray[0] = m;
1222                 return 1;
1223         }
1224
1225         if (rahead > cahead) {
1226                 rahead = cahead;
1227         }
1228
1229         if (rbehind > cbehind) {
1230                 rbehind = cbehind;
1231         }
1232
1233         /*
1234          * try to do any readahead that we might have free pages for.
1235          */
1236         if ((rahead + rbehind) >
1237                 ((vmstats.v_free_count + vmstats.v_cache_count) - vmstats.v_free_reserved)) {
1238                 pagedaemon_wakeup();
1239                 marray[0] = m;
1240                 *reqpage = 0;
1241                 return 1;
1242         }
1243
1244         /*
1245          * scan backward for the read behind pages -- in memory 
1246          *
1247          * Assume that if the page is not found an interrupt will not
1248          * create it.  Theoretically interrupts can only remove (busy)
1249          * pages, not create new associations.
1250          */
1251         if (pindex > 0) {
1252                 if (rbehind > pindex) {
1253                         rbehind = pindex;
1254                         startpindex = 0;
1255                 } else {
1256                         startpindex = pindex - rbehind;
1257                 }
1258
1259                 crit_enter();
1260                 for ( tpindex = pindex - 1; tpindex >= startpindex; tpindex -= 1) {
1261                         if (vm_page_lookup( object, tpindex)) {
1262                                 startpindex = tpindex + 1;
1263                                 break;
1264                         }
1265                         if (tpindex == 0)
1266                                 break;
1267                 }
1268
1269                 for(i = 0, tpindex = startpindex; tpindex < pindex; i++, tpindex++) {
1270
1271                         rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL);
1272                         if (rtm == NULL) {
1273                                 crit_exit();
1274                                 for (j = 0; j < i; j++) {
1275                                         vm_page_free(marray[j]);
1276                                 }
1277                                 marray[0] = m;
1278                                 *reqpage = 0;
1279                                 return 1;
1280                         }
1281
1282                         marray[i] = rtm;
1283                 }
1284                 crit_exit();
1285         } else {
1286                 startpindex = 0;
1287                 i = 0;
1288         }
1289
1290         marray[i] = m;
1291         /* page offset of the required page */
1292         *reqpage = i;
1293
1294         tpindex = pindex + 1;
1295         i++;
1296
1297         /*
1298          * scan forward for the read ahead pages
1299          */
1300         endpindex = tpindex + rahead;
1301         if (endpindex > object->size)
1302                 endpindex = object->size;
1303
1304         crit_enter();
1305         for( ; tpindex < endpindex; i++, tpindex++) {
1306
1307                 if (vm_page_lookup(object, tpindex)) {
1308                         break;
1309                 }
1310
1311                 rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL);
1312                 if (rtm == NULL) {
1313                         break;
1314                 }
1315
1316                 marray[i] = rtm;
1317         }
1318         crit_exit();
1319
1320         /* return number of bytes of pages */
1321         return i;
1322 }