kernel - Performance improvements during heavy memory/IO use
[dragonfly.git] / sys / vm / vm_pageout.c
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  *
11  * This code is derived from software contributed to Berkeley by
12  * The Mach Operating System project at Carnegie-Mellon University.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *      from: @(#)vm_pageout.c  7.4 (Berkeley) 5/7/91
39  *
40  *
41  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42  * All rights reserved.
43  *
44  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45  *
46  * Permission to use, copy, modify and distribute this software and
47  * its documentation is hereby granted, provided that both the copyright
48  * notice and this permission notice appear in all copies of the
49  * software, derivative works or modified versions, and any portions
50  * thereof, and that both notices appear in supporting documentation.
51  *
52  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55  *
56  * Carnegie Mellon requests users of this software to return to
57  *
58  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
59  *  School of Computer Science
60  *  Carnegie Mellon University
61  *  Pittsburgh PA 15213-3890
62  *
63  * any improvements or extensions that they make and grant Carnegie the
64  * rights to redistribute these changes.
65  *
66  * $FreeBSD: src/sys/vm/vm_pageout.c,v 1.151.2.15 2002/12/29 18:21:04 dillon Exp $
67  */
68
69 /*
70  *      The proverbial page-out daemon.
71  */
72
73 #include "opt_vm.h"
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/kernel.h>
77 #include <sys/proc.h>
78 #include <sys/kthread.h>
79 #include <sys/resourcevar.h>
80 #include <sys/signalvar.h>
81 #include <sys/vnode.h>
82 #include <sys/vmmeter.h>
83 #include <sys/sysctl.h>
84
85 #include <vm/vm.h>
86 #include <vm/vm_param.h>
87 #include <sys/lock.h>
88 #include <vm/vm_object.h>
89 #include <vm/vm_page.h>
90 #include <vm/vm_map.h>
91 #include <vm/vm_pageout.h>
92 #include <vm/vm_pager.h>
93 #include <vm/swap_pager.h>
94 #include <vm/vm_extern.h>
95
96 #include <sys/thread2.h>
97 #include <sys/spinlock2.h>
98 #include <vm/vm_page2.h>
99
100 /*
101  * System initialization
102  */
103
104 /* the kernel process "vm_pageout"*/
105 static int vm_pageout_clean (vm_page_t);
106 static int vm_pageout_free_page_calc (vm_size_t count);
107 struct thread *pagethread;
108
109 #if !defined(NO_SWAPPING)
110 /* the kernel process "vm_daemon"*/
111 static void vm_daemon (void);
112 static struct   thread *vmthread;
113
114 static struct kproc_desc vm_kp = {
115         "vmdaemon",
116         vm_daemon,
117         &vmthread
118 };
119 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp)
120 #endif
121
122
123 int vm_pages_needed=0;          /* Event on which pageout daemon sleeps */
124 int vm_pageout_deficit=0;       /* Estimated number of pages deficit */
125 int vm_pageout_pages_needed=0;  /* flag saying that the pageout daemon needs pages */
126
127 #if !defined(NO_SWAPPING)
128 static int vm_pageout_req_swapout;      /* XXX */
129 static int vm_daemon_needed;
130 #endif
131 static int vm_max_launder = 32;
132 static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
133 static int vm_pageout_full_stats_interval = 0;
134 static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0;
135 static int defer_swap_pageouts=0;
136 static int disable_swap_pageouts=0;
137
138 #if defined(NO_SWAPPING)
139 static int vm_swap_enabled=0;
140 static int vm_swap_idle_enabled=0;
141 #else
142 static int vm_swap_enabled=1;
143 static int vm_swap_idle_enabled=0;
144 #endif
145
146 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm,
147         CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt");
148
149 SYSCTL_INT(_vm, OID_AUTO, max_launder,
150         CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout");
151
152 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
153         CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
154
155 SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
156         CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
157
158 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
159         CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
160
161 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
162         CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented");
163
164 #if defined(NO_SWAPPING)
165 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
166         CTLFLAG_RD, &vm_swap_enabled, 0, "");
167 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
168         CTLFLAG_RD, &vm_swap_idle_enabled, 0, "");
169 #else
170 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
171         CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
172 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
173         CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
174 #endif
175
176 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
177         CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
178
179 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
180         CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
181
182 static int pageout_lock_miss;
183 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
184         CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
185
186 #define VM_PAGEOUT_PAGE_COUNT 16
187 int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
188
189 int vm_page_max_wired;          /* XXX max # of wired pages system-wide */
190
191 #if !defined(NO_SWAPPING)
192 typedef void freeer_fcn_t (vm_map_t, vm_object_t, vm_pindex_t, int);
193 static void vm_pageout_map_deactivate_pages (vm_map_t, vm_pindex_t);
194 static freeer_fcn_t vm_pageout_object_deactivate_pages;
195 static void vm_req_vmdaemon (void);
196 #endif
197 static void vm_pageout_page_stats(int q);
198
199 /*
200  * vm_pageout_clean:
201  *
202  * Clean the page and remove it from the laundry.  The page must not be
203  * busy on-call.
204  * 
205  * We set the busy bit to cause potential page faults on this page to
206  * block.  Note the careful timing, however, the busy bit isn't set till
207  * late and we cannot do anything that will mess with the page.
208  */
209 static int
210 vm_pageout_clean(vm_page_t m)
211 {
212         vm_object_t object;
213         vm_page_t mc[2*vm_pageout_page_count];
214         int pageout_count;
215         int error;
216         int ib, is, page_base;
217         vm_pindex_t pindex = m->pindex;
218
219         object = m->object;
220
221         /*
222          * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
223          * with the new swapper, but we could have serious problems paging
224          * out other object types if there is insufficient memory.  
225          *
226          * Unfortunately, checking free memory here is far too late, so the
227          * check has been moved up a procedural level.
228          */
229
230         /*
231          * Don't mess with the page if it's busy, held, or special
232          *
233          * XXX do we really need to check hold_count here?  hold_count
234          * isn't supposed to mess with vm_page ops except prevent the
235          * page from being reused.
236          */
237         if (m->hold_count != 0 || (m->flags & PG_UNMANAGED)) {
238                 vm_page_wakeup(m);
239                 return 0;
240         }
241
242         mc[vm_pageout_page_count] = m;
243         pageout_count = 1;
244         page_base = vm_pageout_page_count;
245         ib = 1;
246         is = 1;
247
248         /*
249          * Scan object for clusterable pages.
250          *
251          * We can cluster ONLY if: ->> the page is NOT
252          * clean, wired, busy, held, or mapped into a
253          * buffer, and one of the following:
254          * 1) The page is inactive, or a seldom used
255          *    active page.
256          * -or-
257          * 2) we force the issue.
258          *
259          * During heavy mmap/modification loads the pageout
260          * daemon can really fragment the underlying file
261          * due to flushing pages out of order and not trying
262          * align the clusters (which leave sporatic out-of-order
263          * holes).  To solve this problem we do the reverse scan
264          * first and attempt to align our cluster, then do a 
265          * forward scan if room remains.
266          */
267
268         vm_object_hold(object);
269 more:
270         while (ib && pageout_count < vm_pageout_page_count) {
271                 vm_page_t p;
272
273                 if (ib > pindex) {
274                         ib = 0;
275                         break;
276                 }
277
278                 p = vm_page_lookup_busy_try(object, pindex - ib, TRUE, &error);
279                 if (error || p == NULL) {
280                         ib = 0;
281                         break;
282                 }
283                 if ((p->queue - p->pc) == PQ_CACHE ||
284                     (p->flags & PG_UNMANAGED)) {
285                         vm_page_wakeup(p);
286                         ib = 0;
287                         break;
288                 }
289                 vm_page_test_dirty(p);
290                 if ((p->dirty & p->valid) == 0 ||
291                     p->queue - p->pc != PQ_INACTIVE ||
292                     p->wire_count != 0 ||       /* may be held by buf cache */
293                     p->hold_count != 0) {       /* may be undergoing I/O */
294                         vm_page_wakeup(p);
295                         ib = 0;
296                         break;
297                 }
298                 mc[--page_base] = p;
299                 ++pageout_count;
300                 ++ib;
301                 /*
302                  * alignment boundry, stop here and switch directions.  Do
303                  * not clear ib.
304                  */
305                 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
306                         break;
307         }
308
309         while (pageout_count < vm_pageout_page_count && 
310             pindex + is < object->size) {
311                 vm_page_t p;
312
313                 p = vm_page_lookup_busy_try(object, pindex + is, TRUE, &error);
314                 if (error || p == NULL)
315                         break;
316                 if (((p->queue - p->pc) == PQ_CACHE) ||
317                     (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) {
318                         vm_page_wakeup(p);
319                         break;
320                 }
321                 vm_page_test_dirty(p);
322                 if ((p->dirty & p->valid) == 0 ||
323                     p->queue - p->pc != PQ_INACTIVE ||
324                     p->wire_count != 0 ||       /* may be held by buf cache */
325                     p->hold_count != 0) {       /* may be undergoing I/O */
326                         vm_page_wakeup(p);
327                         break;
328                 }
329                 mc[page_base + pageout_count] = p;
330                 ++pageout_count;
331                 ++is;
332         }
333
334         /*
335          * If we exhausted our forward scan, continue with the reverse scan
336          * when possible, even past a page boundry.  This catches boundry
337          * conditions.
338          */
339         if (ib && pageout_count < vm_pageout_page_count)
340                 goto more;
341
342         vm_object_drop(object);
343
344         /*
345          * we allow reads during pageouts...
346          */
347         return vm_pageout_flush(&mc[page_base], pageout_count, 0);
348 }
349
350 /*
351  * vm_pageout_flush() - launder the given pages
352  *
353  *      The given pages are laundered.  Note that we setup for the start of
354  *      I/O ( i.e. busy the page ), mark it read-only, and bump the object
355  *      reference count all in here rather then in the parent.  If we want
356  *      the parent to do more sophisticated things we may have to change
357  *      the ordering.
358  *
359  *      The pages in the array must be busied by the caller and will be
360  *      unbusied by this function.
361  */
362 int
363 vm_pageout_flush(vm_page_t *mc, int count, int flags)
364 {
365         vm_object_t object;
366         int pageout_status[count];
367         int numpagedout = 0;
368         int i;
369
370         /*
371          * Initiate I/O.  Bump the vm_page_t->busy counter.
372          */
373         for (i = 0; i < count; i++) {
374                 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
375                         ("vm_pageout_flush page %p index %d/%d: partially "
376                          "invalid page", mc[i], i, count));
377                 vm_page_io_start(mc[i]);
378         }
379
380         /*
381          * We must make the pages read-only.  This will also force the
382          * modified bit in the related pmaps to be cleared.  The pager
383          * cannot clear the bit for us since the I/O completion code
384          * typically runs from an interrupt.  The act of making the page
385          * read-only handles the case for us.
386          *
387          * Then we can unbusy the pages, we still hold a reference by virtue
388          * of our soft-busy.
389          */
390         for (i = 0; i < count; i++) {
391                 vm_page_protect(mc[i], VM_PROT_READ);
392                 vm_page_wakeup(mc[i]);
393         }
394
395         object = mc[0]->object;
396         vm_object_pip_add(object, count);
397
398         vm_pager_put_pages(object, mc, count,
399             (flags | ((object == &kernel_object) ? VM_PAGER_PUT_SYNC : 0)),
400             pageout_status);
401
402         for (i = 0; i < count; i++) {
403                 vm_page_t mt = mc[i];
404
405                 switch (pageout_status[i]) {
406                 case VM_PAGER_OK:
407                         numpagedout++;
408                         break;
409                 case VM_PAGER_PEND:
410                         numpagedout++;
411                         break;
412                 case VM_PAGER_BAD:
413                         /*
414                          * Page outside of range of object. Right now we
415                          * essentially lose the changes by pretending it
416                          * worked.
417                          */
418                         vm_page_busy_wait(mt, FALSE, "pgbad");
419                         pmap_clear_modify(mt);
420                         vm_page_undirty(mt);
421                         vm_page_wakeup(mt);
422                         break;
423                 case VM_PAGER_ERROR:
424                 case VM_PAGER_FAIL:
425                         /*
426                          * A page typically cannot be paged out when we
427                          * have run out of swap.  We leave the page
428                          * marked inactive and will try to page it out
429                          * again later.
430                          *
431                          * Starvation of the active page list is used to
432                          * determine when the system is massively memory
433                          * starved.
434                          */
435                         break;
436                 case VM_PAGER_AGAIN:
437                         break;
438                 }
439
440                 /*
441                  * If the operation is still going, leave the page busy to
442                  * block all other accesses. Also, leave the paging in
443                  * progress indicator set so that we don't attempt an object
444                  * collapse.
445                  *
446                  * For any pages which have completed synchronously, 
447                  * deactivate the page if we are under a severe deficit.
448                  * Do not try to enter them into the cache, though, they
449                  * might still be read-heavy.
450                  */
451                 if (pageout_status[i] != VM_PAGER_PEND) {
452                         vm_page_busy_wait(mt, FALSE, "pgouw");
453                         if (vm_page_count_severe())
454                                 vm_page_deactivate(mt);
455 #if 0
456                         if (!vm_page_count_severe() || !vm_page_try_to_cache(mt))
457                                 vm_page_protect(mt, VM_PROT_READ);
458 #endif
459                         vm_page_io_finish(mt);
460                         vm_page_wakeup(mt);
461                         vm_object_pip_wakeup(object);
462                 }
463         }
464         return numpagedout;
465 }
466
467 #if !defined(NO_SWAPPING)
468 /*
469  * deactivate enough pages to satisfy the inactive target
470  * requirements or if vm_page_proc_limit is set, then
471  * deactivate all of the pages in the object and its
472  * backing_objects.
473  *
474  * The map must be locked.
475  * The caller must hold the vm_object.
476  */
477 static int vm_pageout_object_deactivate_pages_callback(vm_page_t, void *);
478
479 static void
480 vm_pageout_object_deactivate_pages(vm_map_t map, vm_object_t object,
481                                    vm_pindex_t desired, int map_remove_only)
482 {
483         struct rb_vm_page_scan_info info;
484         vm_object_t lobject;
485         vm_object_t tobject;
486         int remove_mode;
487
488         ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
489         lobject = object;
490
491         while (lobject) {
492                 if (pmap_resident_count(vm_map_pmap(map)) <= desired)
493                         break;
494                 if (lobject->type == OBJT_DEVICE || lobject->type == OBJT_PHYS)
495                         break;
496                 if (lobject->paging_in_progress)
497                         break;
498
499                 remove_mode = map_remove_only;
500                 if (lobject->shadow_count > 1)
501                         remove_mode = 1;
502
503                 /*
504                  * scan the objects entire memory queue.  We hold the
505                  * object's token so the scan should not race anything.
506                  */
507                 info.limit = remove_mode;
508                 info.map = map;
509                 info.desired = desired;
510                 vm_page_rb_tree_RB_SCAN(&lobject->rb_memq, NULL,
511                                 vm_pageout_object_deactivate_pages_callback,
512                                 &info
513                 );
514                 while ((tobject = lobject->backing_object) != NULL) {
515                         KKASSERT(tobject != object);
516                         vm_object_hold(tobject);
517                         if (tobject == lobject->backing_object)
518                                 break;
519                         vm_object_drop(tobject);
520                 }
521                 if (lobject != object) {
522                         vm_object_lock_swap();
523                         vm_object_drop(lobject);
524                 }
525                 lobject = tobject;
526         }
527         if (lobject != object)
528                 vm_object_drop(lobject);
529 }
530
531 /*
532  * The caller must hold the vm_object.
533  */
534 static int
535 vm_pageout_object_deactivate_pages_callback(vm_page_t p, void *data)
536 {
537         struct rb_vm_page_scan_info *info = data;
538         int actcount;
539
540         if (pmap_resident_count(vm_map_pmap(info->map)) <= info->desired) {
541                 return(-1);
542         }
543         mycpu->gd_cnt.v_pdpages++;
544
545         if (vm_page_busy_try(p, TRUE))
546                 return(0);
547         if (p->wire_count || p->hold_count || (p->flags & PG_UNMANAGED)) {
548                 vm_page_wakeup(p);
549                 return(0);
550         }
551         if (!pmap_page_exists_quick(vm_map_pmap(info->map), p)) {
552                 vm_page_wakeup(p);
553                 return(0);
554         }
555
556         actcount = pmap_ts_referenced(p);
557         if (actcount) {
558                 vm_page_flag_set(p, PG_REFERENCED);
559         } else if (p->flags & PG_REFERENCED) {
560                 actcount = 1;
561         }
562
563         vm_page_and_queue_spin_lock(p);
564         if (p->queue - p->pc != PQ_ACTIVE && (p->flags & PG_REFERENCED)) {
565                 vm_page_and_queue_spin_unlock(p);
566                 vm_page_activate(p);
567                 p->act_count += actcount;
568                 vm_page_flag_clear(p, PG_REFERENCED);
569         } else if (p->queue - p->pc == PQ_ACTIVE) {
570                 if ((p->flags & PG_REFERENCED) == 0) {
571                         p->act_count -= min(p->act_count, ACT_DECLINE);
572                         if (!info->limit &&
573                             (vm_pageout_algorithm || (p->act_count == 0))) {
574                                 vm_page_and_queue_spin_unlock(p);
575                                 vm_page_protect(p, VM_PROT_NONE);
576                                 vm_page_deactivate(p);
577                         } else {
578                                 TAILQ_REMOVE(&vm_page_queues[p->queue].pl,
579                                              p, pageq);
580                                 TAILQ_INSERT_TAIL(&vm_page_queues[p->queue].pl,
581                                                   p, pageq);
582                                 vm_page_and_queue_spin_unlock(p);
583                         }
584                 } else {
585                         vm_page_and_queue_spin_unlock(p);
586                         vm_page_activate(p);
587                         vm_page_flag_clear(p, PG_REFERENCED);
588
589                         vm_page_and_queue_spin_lock(p);
590                         if (p->queue - p->pc == PQ_ACTIVE) {
591                                 if (p->act_count < (ACT_MAX - ACT_ADVANCE))
592                                         p->act_count += ACT_ADVANCE;
593                                 TAILQ_REMOVE(&vm_page_queues[p->queue].pl,
594                                              p, pageq);
595                                 TAILQ_INSERT_TAIL(&vm_page_queues[p->queue].pl,
596                                                   p, pageq);
597                         }
598                         vm_page_and_queue_spin_unlock(p);
599                 }
600         } else if (p->queue - p->pc == PQ_INACTIVE) {
601                 vm_page_and_queue_spin_unlock(p);
602                 vm_page_protect(p, VM_PROT_NONE);
603         } else {
604                 vm_page_and_queue_spin_unlock(p);
605         }
606         vm_page_wakeup(p);
607         return(0);
608 }
609
610 /*
611  * Deactivate some number of pages in a map, try to do it fairly, but
612  * that is really hard to do.
613  */
614 static void
615 vm_pageout_map_deactivate_pages(vm_map_t map, vm_pindex_t desired)
616 {
617         vm_map_entry_t tmpe;
618         vm_object_t obj, bigobj;
619         int nothingwired;
620
621         if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT)) {
622                 return;
623         }
624
625         bigobj = NULL;
626         nothingwired = TRUE;
627
628         /*
629          * first, search out the biggest object, and try to free pages from
630          * that.
631          */
632         tmpe = map->header.next;
633         while (tmpe != &map->header) {
634                 switch(tmpe->maptype) {
635                 case VM_MAPTYPE_NORMAL:
636                 case VM_MAPTYPE_VPAGETABLE:
637                         obj = tmpe->object.vm_object;
638                         if ((obj != NULL) && (obj->shadow_count <= 1) &&
639                                 ((bigobj == NULL) ||
640                                  (bigobj->resident_page_count < obj->resident_page_count))) {
641                                 bigobj = obj;
642                         }
643                         break;
644                 default:
645                         break;
646                 }
647                 if (tmpe->wired_count > 0)
648                         nothingwired = FALSE;
649                 tmpe = tmpe->next;
650         }
651
652         if (bigobj)  {
653                 vm_object_hold(bigobj);
654                 vm_pageout_object_deactivate_pages(map, bigobj, desired, 0);
655                 vm_object_drop(bigobj);
656         }
657
658         /*
659          * Next, hunt around for other pages to deactivate.  We actually
660          * do this search sort of wrong -- .text first is not the best idea.
661          */
662         tmpe = map->header.next;
663         while (tmpe != &map->header) {
664                 if (pmap_resident_count(vm_map_pmap(map)) <= desired)
665                         break;
666                 switch(tmpe->maptype) {
667                 case VM_MAPTYPE_NORMAL:
668                 case VM_MAPTYPE_VPAGETABLE:
669                         obj = tmpe->object.vm_object;
670                         if (obj) {
671                                 vm_object_hold(obj);
672                                 vm_pageout_object_deactivate_pages(map, obj, desired, 0);
673                                 vm_object_drop(obj);
674                         }
675                         break;
676                 default:
677                         break;
678                 }
679                 tmpe = tmpe->next;
680         };
681
682         /*
683          * Remove all mappings if a process is swapped out, this will free page
684          * table pages.
685          */
686         if (desired == 0 && nothingwired)
687                 pmap_remove(vm_map_pmap(map),
688                             VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
689         vm_map_unlock(map);
690 }
691 #endif
692
693 /*
694  * Called when the pageout scan wants to free a page.  We no longer
695  * try to cycle the vm_object here with a reference & dealloc, which can
696  * cause a non-trivial object collapse in a critical path.
697  *
698  * It is unclear why we cycled the ref_count in the past, perhaps to try
699  * to optimize shadow chain collapses but I don't quite see why it would
700  * be necessary.  An OBJ_DEAD object should terminate any and all vm_pages
701  * synchronously and not have to be kicked-start.
702  */
703 static void
704 vm_pageout_page_free(vm_page_t m) 
705 {
706         vm_page_protect(m, VM_PROT_NONE);
707         vm_page_free(m);
708 }
709
710 /*
711  * vm_pageout_scan does the dirty work for the pageout daemon.
712  */
713 struct vm_pageout_scan_info {
714         struct proc *bigproc;
715         vm_offset_t bigsize;
716 };
717
718 static int vm_pageout_scan_callback(struct proc *p, void *data);
719
720 static int
721 vm_pageout_scan_inactive(int pass, int q, int inactive_shortage,
722                          int *vnodes_skippedp)
723 {
724         vm_page_t m;
725         struct vm_page marker;
726         struct vnode *vpfailed;         /* warning, allowed to be stale */
727         int maxscan;
728         int delta = 0;
729         vm_object_t object;
730         int actcount;
731         int maxlaunder;
732
733         /*
734          * Start scanning the inactive queue for pages we can move to the
735          * cache or free.  The scan will stop when the target is reached or
736          * we have scanned the entire inactive queue.  Note that m->act_count
737          * is not used to form decisions for the inactive queue, only for the
738          * active queue.
739          *
740          * maxlaunder limits the number of dirty pages we flush per scan.
741          * For most systems a smaller value (16 or 32) is more robust under
742          * extreme memory and disk pressure because any unnecessary writes
743          * to disk can result in extreme performance degredation.  However,
744          * systems with excessive dirty pages (especially when MAP_NOSYNC is
745          * used) will die horribly with limited laundering.  If the pageout
746          * daemon cannot clean enough pages in the first pass, we let it go
747          * all out in succeeding passes.
748          */
749         if ((maxlaunder = vm_max_launder) <= 1)
750                 maxlaunder = 1;
751         if (pass)
752                 maxlaunder = 10000;
753
754         /*
755          * Initialize our marker
756          */
757         bzero(&marker, sizeof(marker));
758         marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
759         marker.queue = PQ_INACTIVE + q;
760         marker.pc = q;
761         marker.wire_count = 1;
762
763         /*
764          * Inactive queue scan.
765          *
766          * NOTE: The vm_page must be spinlocked before the queue to avoid
767          *       deadlocks, so it is easiest to simply iterate the loop
768          *       with the queue unlocked at the top.
769          */
770         vpfailed = NULL;
771
772         vm_page_queues_spin_lock(PQ_INACTIVE + q);
773         TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
774         maxscan = vmstats.v_inactive_count;
775         vm_page_queues_spin_unlock(PQ_INACTIVE + q);
776
777         while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
778                maxscan-- > 0 && inactive_shortage - delta > 0)
779         {
780                 vm_page_and_queue_spin_lock(m);
781                 if (m != TAILQ_NEXT(&marker, pageq)) {
782                         vm_page_and_queue_spin_unlock(m);
783                         ++maxscan;
784                         continue;
785                 }
786                 KKASSERT(m->queue - m->pc == PQ_INACTIVE);
787                 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl,
788                              &marker, pageq);
789                 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE + q].pl, m,
790                                    &marker, pageq);
791                 mycpu->gd_cnt.v_pdpages++;
792
793                 /*
794                  * Skip marker pages
795                  */
796                 if (m->flags & PG_MARKER) {
797                         vm_page_and_queue_spin_unlock(m);
798                         continue;
799                 }
800
801                 /*
802                  * Try to busy the page.  Don't mess with pages which are
803                  * already busy or reorder them in the queue.
804                  */
805                 if (vm_page_busy_try(m, TRUE)) {
806                         vm_page_and_queue_spin_unlock(m);
807                         continue;
808                 }
809                 vm_page_and_queue_spin_unlock(m);
810                 KKASSERT(m->queue - m->pc == PQ_INACTIVE);
811
812                 lwkt_yield();
813
814                 /*
815                  * The page has been successfully busied and is now no
816                  * longer spinlocked.  The queue is no longer spinlocked
817                  * either.
818                  */
819
820                 /*
821                  * It is possible for a page to be busied ad-hoc (e.g. the
822                  * pmap_collect() code) and wired and race against the
823                  * allocation of a new page.  vm_page_alloc() may be forced
824                  * to deactivate the wired page in which case it winds up
825                  * on the inactive queue and must be handled here.  We
826                  * correct the problem simply by unqueuing the page.
827                  */
828                 if (m->wire_count) {
829                         vm_page_unqueue_nowakeup(m);
830                         vm_page_wakeup(m);
831                         kprintf("WARNING: pagedaemon: wired page on "
832                                 "inactive queue %p\n", m);
833                         continue;
834                 }
835
836                 /*
837                  * A held page may be undergoing I/O, so skip it.
838                  */
839                 if (m->hold_count) {
840                         vm_page_and_queue_spin_lock(m);
841                         if (m->queue - m->pc == PQ_INACTIVE) {
842                                 TAILQ_REMOVE(
843                                         &vm_page_queues[PQ_INACTIVE + q].pl,
844                                         m, pageq);
845                                 TAILQ_INSERT_TAIL(
846                                         &vm_page_queues[PQ_INACTIVE + q].pl,
847                                         m, pageq);
848                         }
849                         vm_page_and_queue_spin_unlock(m);
850                         ++vm_swapcache_inactive_heuristic;
851                         vm_page_wakeup(m);
852                         continue;
853                 }
854
855                 if (m->object->ref_count == 0) {
856                         /*
857                          * If the object is not being used, we ignore previous 
858                          * references.
859                          */
860                         vm_page_flag_clear(m, PG_REFERENCED);
861                         pmap_clear_reference(m);
862                         /* fall through to end */
863                 } else if (((m->flags & PG_REFERENCED) == 0) &&
864                             (actcount = pmap_ts_referenced(m))) {
865                         /*
866                          * Otherwise, if the page has been referenced while 
867                          * in the inactive queue, we bump the "activation
868                          * count" upwards, making it less likely that the
869                          * page will be added back to the inactive queue
870                          * prematurely again.  Here we check the page tables
871                          * (or emulated bits, if any), given the upper level
872                          * VM system not knowing anything about existing 
873                          * references.
874                          */
875                         vm_page_activate(m);
876                         m->act_count += (actcount + ACT_ADVANCE);
877                         vm_page_wakeup(m);
878                         continue;
879                 }
880
881                 /*
882                  * (m) is still busied.
883                  *
884                  * If the upper level VM system knows about any page 
885                  * references, we activate the page.  We also set the 
886                  * "activation count" higher than normal so that we will less 
887                  * likely place pages back onto the inactive queue again.
888                  */
889                 if ((m->flags & PG_REFERENCED) != 0) {
890                         vm_page_flag_clear(m, PG_REFERENCED);
891                         actcount = pmap_ts_referenced(m);
892                         vm_page_activate(m);
893                         m->act_count += (actcount + ACT_ADVANCE + 1);
894                         vm_page_wakeup(m);
895                         continue;
896                 }
897
898                 /*
899                  * If the upper level VM system doesn't know anything about 
900                  * the page being dirty, we have to check for it again.  As 
901                  * far as the VM code knows, any partially dirty pages are 
902                  * fully dirty.
903                  *
904                  * Pages marked PG_WRITEABLE may be mapped into the user
905                  * address space of a process running on another cpu.  A
906                  * user process (without holding the MP lock) running on
907                  * another cpu may be able to touch the page while we are
908                  * trying to remove it.  vm_page_cache() will handle this
909                  * case for us.
910                  */
911                 if (m->dirty == 0) {
912                         vm_page_test_dirty(m);
913                 } else {
914                         vm_page_dirty(m);
915                 }
916
917                 if (m->valid == 0) {
918                         /*
919                          * Invalid pages can be easily freed
920                          */
921                         vm_pageout_page_free(m);
922                         mycpu->gd_cnt.v_dfree++;
923                         ++delta;
924                 } else if (m->dirty == 0) {
925                         /*
926                          * Clean pages can be placed onto the cache queue.
927                          * This effectively frees them.
928                          */
929                         vm_page_cache(m);
930                         ++delta;
931                 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) {
932                         /*
933                          * Dirty pages need to be paged out, but flushing
934                          * a page is extremely expensive verses freeing
935                          * a clean page.  Rather then artificially limiting
936                          * the number of pages we can flush, we instead give
937                          * dirty pages extra priority on the inactive queue
938                          * by forcing them to be cycled through the queue
939                          * twice before being flushed, after which the 
940                          * (now clean) page will cycle through once more
941                          * before being freed.  This significantly extends
942                          * the thrash point for a heavily loaded machine.
943                          */
944                         vm_page_flag_set(m, PG_WINATCFLS);
945                         vm_page_and_queue_spin_lock(m);
946                         if (m->queue - m->pc == PQ_INACTIVE) {
947                                 TAILQ_REMOVE(
948                                         &vm_page_queues[PQ_INACTIVE + q].pl,
949                                         m, pageq);
950                                 TAILQ_INSERT_TAIL(
951                                         &vm_page_queues[PQ_INACTIVE + q].pl,
952                                         m, pageq);
953                         }
954                         vm_page_and_queue_spin_unlock(m);
955                         ++vm_swapcache_inactive_heuristic;
956                         vm_page_wakeup(m);
957                 } else if (maxlaunder > 0) {
958                         /*
959                          * We always want to try to flush some dirty pages if
960                          * we encounter them, to keep the system stable.
961                          * Normally this number is small, but under extreme
962                          * pressure where there are insufficient clean pages
963                          * on the inactive queue, we may have to go all out.
964                          */
965                         int swap_pageouts_ok;
966                         struct vnode *vp = NULL;
967
968                         object = m->object;
969
970                         if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) {
971                                 swap_pageouts_ok = 1;
972                         } else {
973                                 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
974                                 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
975                                 vm_page_count_min(0));
976                                                                                 
977                         }
978
979                         /*
980                          * We don't bother paging objects that are "dead".  
981                          * Those objects are in a "rundown" state.
982                          */
983                         if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) {
984                                 vm_page_and_queue_spin_lock(m);
985                                 if (m->queue - m->pc == PQ_INACTIVE) {
986                                         TAILQ_REMOVE(
987                                             &vm_page_queues[PQ_INACTIVE + q].pl,
988                                             m, pageq);
989                                         TAILQ_INSERT_TAIL(
990                                             &vm_page_queues[PQ_INACTIVE + q].pl,
991                                             m, pageq);
992                                 }
993                                 vm_page_and_queue_spin_unlock(m);
994                                 ++vm_swapcache_inactive_heuristic;
995                                 vm_page_wakeup(m);
996                                 continue;
997                         }
998
999                         /*
1000                          * (m) is still busied.
1001                          *
1002                          * The object is already known NOT to be dead.   It
1003                          * is possible for the vget() to block the whole
1004                          * pageout daemon, but the new low-memory handling
1005                          * code should prevent it.
1006                          *
1007                          * The previous code skipped locked vnodes and, worse,
1008                          * reordered pages in the queue.  This results in
1009                          * completely non-deterministic operation because,
1010                          * quite often, a vm_fault has initiated an I/O and
1011                          * is holding a locked vnode at just the point where
1012                          * the pageout daemon is woken up.
1013                          *
1014                          * We can't wait forever for the vnode lock, we might
1015                          * deadlock due to a vn_read() getting stuck in
1016                          * vm_wait while holding this vnode.  We skip the 
1017                          * vnode if we can't get it in a reasonable amount
1018                          * of time.
1019                          *
1020                          * vpfailed is used to (try to) avoid the case where
1021                          * a large number of pages are associated with a
1022                          * locked vnode, which could cause the pageout daemon
1023                          * to stall for an excessive amount of time.
1024                          */
1025                         if (object->type == OBJT_VNODE) {
1026                                 int flags;
1027
1028                                 vp = object->handle;
1029                                 flags = LK_EXCLUSIVE | LK_NOOBJ;
1030                                 if (vp == vpfailed)
1031                                         flags |= LK_NOWAIT;
1032                                 else
1033                                         flags |= LK_TIMELOCK;
1034                                 vm_page_hold(m);
1035                                 vm_page_wakeup(m);
1036
1037                                 /*
1038                                  * We have unbusied (m) temporarily so we can
1039                                  * acquire the vp lock without deadlocking.
1040                                  * (m) is held to prevent destruction.
1041                                  */
1042                                 if (vget(vp, flags) != 0) {
1043                                         vpfailed = vp;
1044                                         ++pageout_lock_miss;
1045                                         if (object->flags & OBJ_MIGHTBEDIRTY)
1046                                                     ++*vnodes_skippedp;
1047                                         vm_page_unhold(m);
1048                                         continue;
1049                                 }
1050
1051                                 /*
1052                                  * The page might have been moved to another
1053                                  * queue during potential blocking in vget()
1054                                  * above.  The page might have been freed and
1055                                  * reused for another vnode.  The object might
1056                                  * have been reused for another vnode.
1057                                  */
1058                                 if (m->queue - m->pc != PQ_INACTIVE ||
1059                                     m->object != object ||
1060                                     object->handle != vp) {
1061                                         if (object->flags & OBJ_MIGHTBEDIRTY)
1062                                                 ++*vnodes_skippedp;
1063                                         vput(vp);
1064                                         vm_page_unhold(m);
1065                                         continue;
1066                                 }
1067         
1068                                 /*
1069                                  * The page may have been busied during the
1070                                  * blocking in vput();  We don't move the
1071                                  * page back onto the end of the queue so that
1072                                  * statistics are more correct if we don't.
1073                                  */
1074                                 if (vm_page_busy_try(m, TRUE)) {
1075                                         vput(vp);
1076                                         vm_page_unhold(m);
1077                                         continue;
1078                                 }
1079                                 vm_page_unhold(m);
1080
1081                                 /*
1082                                  * (m) is busied again
1083                                  *
1084                                  * We own the busy bit and remove our hold
1085                                  * bit.  If the page is still held it
1086                                  * might be undergoing I/O, so skip it.
1087                                  */
1088                                 if (m->hold_count) {
1089                                         vm_page_and_queue_spin_lock(m);
1090                                         if (m->queue - m->pc == PQ_INACTIVE) {
1091                                                 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl, m, pageq);
1092                                                 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE + q].pl, m, pageq);
1093                                         }
1094                                         vm_page_and_queue_spin_unlock(m);
1095                                         ++vm_swapcache_inactive_heuristic;
1096                                         if (object->flags & OBJ_MIGHTBEDIRTY)
1097                                                 ++*vnodes_skippedp;
1098                                         vm_page_wakeup(m);
1099                                         vput(vp);
1100                                         continue;
1101                                 }
1102                                 /* (m) is left busied as we fall through */
1103                         }
1104
1105                         /*
1106                          * page is busy and not held here.
1107                          *
1108                          * If a page is dirty, then it is either being washed
1109                          * (but not yet cleaned) or it is still in the
1110                          * laundry.  If it is still in the laundry, then we
1111                          * start the cleaning operation. 
1112                          *
1113                          * decrement inactive_shortage on success to account
1114                          * for the (future) cleaned page.  Otherwise we
1115                          * could wind up laundering or cleaning too many
1116                          * pages.
1117                          */
1118                         if (vm_pageout_clean(m) != 0) {
1119                                 ++delta;
1120                                 --maxlaunder;
1121                         }
1122                         /* clean ate busy, page no longer accessible */
1123                         if (vp != NULL)
1124                                 vput(vp);
1125                 } else {
1126                         vm_page_wakeup(m);
1127                 }
1128         }
1129         vm_page_queues_spin_lock(PQ_INACTIVE + q);
1130         TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
1131         vm_page_queues_spin_unlock(PQ_INACTIVE + q);
1132
1133         return (delta);
1134 }
1135
1136 static int
1137 vm_pageout_scan_active(int pass, int q,
1138                        int inactive_shortage, int active_shortage,
1139                        int *recycle_countp)
1140 {
1141         struct vm_page marker;
1142         vm_page_t m;
1143         int actcount;
1144         int delta = 0;
1145         int pcount;
1146
1147         /*
1148          * We want to move pages from the active queue to the inactive
1149          * queue to get the inactive queue to the inactive target.  If
1150          * we still have a page shortage from above we try to directly free
1151          * clean pages instead of moving them.
1152          *
1153          * If we do still have a shortage we keep track of the number of
1154          * pages we free or cache (recycle_count) as a measure of thrashing
1155          * between the active and inactive queues.
1156          *
1157          * If we were able to completely satisfy the free+cache targets
1158          * from the inactive pool we limit the number of pages we move
1159          * from the active pool to the inactive pool to 2x the pages we
1160          * had removed from the inactive pool (with a minimum of 1/5 the
1161          * inactive target).  If we were not able to completely satisfy
1162          * the free+cache targets we go for the whole target aggressively.
1163          *
1164          * NOTE: Both variables can end up negative.
1165          * NOTE: We are still in a critical section.
1166          */
1167
1168         bzero(&marker, sizeof(marker));
1169         marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
1170         marker.queue = PQ_ACTIVE + q;
1171         marker.pc = q;
1172         marker.wire_count = 1;
1173
1174         vm_page_queues_spin_lock(PQ_ACTIVE + q);
1175         TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1176         vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1177         pcount = vmstats.v_active_count;
1178
1179         while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
1180                pcount-- > 0 && (inactive_shortage - delta > 0 ||
1181                                 active_shortage > 0))
1182         {
1183                 vm_page_and_queue_spin_lock(m);
1184                 if (m != TAILQ_NEXT(&marker, pageq)) {
1185                         vm_page_and_queue_spin_unlock(m);
1186                         ++pcount;
1187                         continue;
1188                 }
1189                 KKASSERT(m->queue - m->pc == PQ_ACTIVE);
1190                 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl,
1191                              &marker, pageq);
1192                 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
1193                                    &marker, pageq);
1194
1195                 /*
1196                  * Skip marker pages
1197                  */
1198                 if (m->flags & PG_MARKER) {
1199                         vm_page_and_queue_spin_unlock(m);
1200                         continue;
1201                 }
1202
1203                 /*
1204                  * Try to busy the page.  Don't mess with pages which are
1205                  * already busy or reorder them in the queue.
1206                  */
1207                 if (vm_page_busy_try(m, TRUE)) {
1208                         vm_page_and_queue_spin_unlock(m);
1209                         continue;
1210                 }
1211
1212                 /*
1213                  * Don't deactivate pages that are held, even if we can
1214                  * busy them.  (XXX why not?)
1215                  */
1216                 if (m->hold_count != 0) {
1217                         TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl,
1218                                      m, pageq);
1219                         TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE + q].pl,
1220                                           m, pageq);
1221                         vm_page_and_queue_spin_unlock(m);
1222                         vm_page_wakeup(m);
1223                         continue;
1224                 }
1225                 vm_page_and_queue_spin_unlock(m);
1226                 lwkt_yield();
1227
1228                 /*
1229                  * The page has been successfully busied and the page and
1230                  * queue are no longer locked.
1231                  */
1232
1233                 /*
1234                  * The count for pagedaemon pages is done after checking the
1235                  * page for eligibility...
1236                  */
1237                 mycpu->gd_cnt.v_pdpages++;
1238
1239                 /*
1240                  * Check to see "how much" the page has been used and clear
1241                  * the tracking access bits.  If the object has no references
1242                  * don't bother paying the expense.
1243                  */
1244                 actcount = 0;
1245                 if (m->object->ref_count != 0) {
1246                         if (m->flags & PG_REFERENCED)
1247                                 ++actcount;
1248                         actcount += pmap_ts_referenced(m);
1249                         if (actcount) {
1250                                 m->act_count += ACT_ADVANCE + actcount;
1251                                 if (m->act_count > ACT_MAX)
1252                                         m->act_count = ACT_MAX;
1253                         }
1254                 }
1255                 vm_page_flag_clear(m, PG_REFERENCED);
1256
1257                 /*
1258                  * actcount is only valid if the object ref_count is non-zero.
1259                  */
1260                 if (actcount && m->object->ref_count != 0) {
1261                         vm_page_and_queue_spin_lock(m);
1262                         if (m->queue - m->pc == PQ_ACTIVE) {
1263                                 TAILQ_REMOVE(
1264                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1265                                         m, pageq);
1266                                 TAILQ_INSERT_TAIL(
1267                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1268                                         m, pageq);
1269                         }
1270                         vm_page_and_queue_spin_unlock(m);
1271                         vm_page_wakeup(m);
1272                 } else {
1273                         m->act_count -= min(m->act_count, ACT_DECLINE);
1274                         if (vm_pageout_algorithm ||
1275                             m->object->ref_count == 0 ||
1276                             m->act_count < pass + 1
1277                         ) {
1278                                 /*
1279                                  * Deactivate the page.  If we had a
1280                                  * shortage from our inactive scan try to
1281                                  * free (cache) the page instead.
1282                                  *
1283                                  * Don't just blindly cache the page if
1284                                  * we do not have a shortage from the
1285                                  * inactive scan, that could lead to
1286                                  * gigabytes being moved.
1287                                  */
1288                                 --active_shortage;
1289                                 if (inactive_shortage - delta > 0 ||
1290                                     m->object->ref_count == 0) {
1291                                         if (inactive_shortage - delta > 0)
1292                                                 ++*recycle_countp;
1293                                         vm_page_protect(m, VM_PROT_NONE);
1294                                         if (m->dirty == 0 &&
1295                                             inactive_shortage - delta > 0) {
1296                                                 ++delta;
1297                                                 vm_page_cache(m);
1298                                         } else {
1299                                                 vm_page_deactivate(m);
1300                                                 vm_page_wakeup(m);
1301                                         }
1302                                 } else {
1303                                         vm_page_deactivate(m);
1304                                         vm_page_wakeup(m);
1305                                 }
1306                         } else {
1307                                 vm_page_and_queue_spin_lock(m);
1308                                 if (m->queue - m->pc == PQ_ACTIVE) {
1309                                         TAILQ_REMOVE(
1310                                             &vm_page_queues[PQ_ACTIVE + q].pl,
1311                                             m, pageq);
1312                                         TAILQ_INSERT_TAIL(
1313                                             &vm_page_queues[PQ_ACTIVE + q].pl,
1314                                             m, pageq);
1315                                 }
1316                                 vm_page_and_queue_spin_unlock(m);
1317                                 vm_page_wakeup(m);
1318                         }
1319                 }
1320         }
1321
1322         /*
1323          * Clean out our local marker.
1324          */
1325         vm_page_queues_spin_lock(PQ_ACTIVE + q);
1326         TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1327         vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1328
1329         return (delta);
1330 }
1331
1332 /*
1333  * The number of actually free pages can drop down to v_free_reserved,
1334  * we try to build the free count back above v_free_min.  Note that
1335  * vm_paging_needed() also returns TRUE if v_free_count is not at
1336  * least v_free_min so that is the minimum we must build the free
1337  * count to.
1338  *
1339  * We use a slightly higher target to improve hysteresis,
1340  * ((v_free_target + v_free_min) / 2).  Since v_free_target
1341  * is usually the same as v_cache_min this maintains about
1342  * half the pages in the free queue as are in the cache queue,
1343  * providing pretty good pipelining for pageout operation.
1344  *
1345  * The system operator can manipulate vm.v_cache_min and
1346  * vm.v_free_target to tune the pageout demon.  Be sure
1347  * to keep vm.v_free_min < vm.v_free_target.
1348  *
1349  * Note that the original paging target is to get at least
1350  * (free_min + cache_min) into (free + cache).  The slightly
1351  * higher target will shift additional pages from cache to free
1352  * without effecting the original paging target in order to
1353  * maintain better hysteresis and not have the free count always
1354  * be dead-on v_free_min.
1355  *
1356  * NOTE: we are still in a critical section.
1357  *
1358  * Pages moved from PQ_CACHE to totally free are not counted in the
1359  * pages_freed counter.
1360  */
1361 static void
1362 vm_pageout_scan_cache(int inactive_shortage,
1363                       int vnodes_skipped, int recycle_count)
1364 {
1365         struct vm_pageout_scan_info info;
1366         vm_page_t m;
1367
1368         while (vmstats.v_free_count <
1369                (vmstats.v_free_min + vmstats.v_free_target) / 2) {
1370                 /*
1371                  * This steals some code from vm/vm_page.c
1372                  */
1373                 static int cache_rover = 0;
1374
1375                 m = vm_page_list_find(PQ_CACHE, cache_rover & PQ_L2_MASK, FALSE);
1376                 if (m == NULL)
1377                         break;
1378                 /* page is returned removed from its queue and spinlocked */
1379                 if (vm_page_busy_try(m, TRUE)) {
1380                         vm_page_deactivate_locked(m);
1381                         vm_page_spin_unlock(m);
1382 #ifdef INVARIANTS
1383                         kprintf("Warning: busy page %p found in cache\n", m);
1384 #endif
1385                         continue;
1386                 }
1387                 vm_page_spin_unlock(m);
1388                 pagedaemon_wakeup();
1389                 lwkt_yield();
1390
1391                 /*
1392                  * Page has been successfully busied and it and its queue
1393                  * is no longer spinlocked.
1394                  */
1395                 if ((m->flags & PG_UNMANAGED) ||
1396                     m->hold_count ||
1397                     m->wire_count) {
1398                         vm_page_deactivate(m);
1399                         vm_page_wakeup(m);
1400                         continue;
1401                 }
1402                 KKASSERT((m->flags & PG_MAPPED) == 0);
1403                 KKASSERT(m->dirty == 0);
1404                 cache_rover += PQ_PRIME2;
1405                 vm_pageout_page_free(m);
1406                 mycpu->gd_cnt.v_dfree++;
1407         }
1408
1409 #if !defined(NO_SWAPPING)
1410         /*
1411          * Idle process swapout -- run once per second.
1412          */
1413         if (vm_swap_idle_enabled) {
1414                 static long lsec;
1415                 if (time_second != lsec) {
1416                         vm_pageout_req_swapout |= VM_SWAP_IDLE;
1417                         vm_req_vmdaemon();
1418                         lsec = time_second;
1419                 }
1420         }
1421 #endif
1422                 
1423         /*
1424          * If we didn't get enough free pages, and we have skipped a vnode
1425          * in a writeable object, wakeup the sync daemon.  And kick swapout
1426          * if we did not get enough free pages.
1427          */
1428         if (vm_paging_target() > 0) {
1429                 if (vnodes_skipped && vm_page_count_min(0))
1430                         speedup_syncer();
1431 #if !defined(NO_SWAPPING)
1432                 if (vm_swap_enabled && vm_page_count_target()) {
1433                         vm_req_vmdaemon();
1434                         vm_pageout_req_swapout |= VM_SWAP_NORMAL;
1435                 }
1436 #endif
1437         }
1438
1439         /*
1440          * Handle catastrophic conditions.  Under good conditions we should
1441          * be at the target, well beyond our minimum.  If we could not even
1442          * reach our minimum the system is under heavy stress.
1443          *
1444          * Determine whether we have run out of memory.  This occurs when
1445          * swap_pager_full is TRUE and the only pages left in the page
1446          * queues are dirty.  We will still likely have page shortages.
1447          *
1448          * - swap_pager_full is set if insufficient swap was
1449          *   available to satisfy a requested pageout.
1450          *
1451          * - the inactive queue is bloated (4 x size of active queue),
1452          *   meaning it is unable to get rid of dirty pages and.
1453          *
1454          * - vm_page_count_min() without counting pages recycled from the
1455          *   active queue (recycle_count) means we could not recover
1456          *   enough pages to meet bare minimum needs.  This test only
1457          *   works if the inactive queue is bloated.
1458          *
1459          * - due to a positive inactive_shortage we shifted the remaining
1460          *   dirty pages from the active queue to the inactive queue
1461          *   trying to find clean ones to free.
1462          */
1463         if (swap_pager_full && vm_page_count_min(recycle_count))
1464                 kprintf("Warning: system low on memory+swap!\n");
1465         if (swap_pager_full && vm_page_count_min(recycle_count) &&
1466             vmstats.v_inactive_count > vmstats.v_active_count * 4 &&
1467             inactive_shortage > 0) {
1468                 /*
1469                  * Kill something.
1470                  */
1471                 info.bigproc = NULL;
1472                 info.bigsize = 0;
1473                 allproc_scan(vm_pageout_scan_callback, &info);
1474                 if (info.bigproc != NULL) {
1475                         killproc(info.bigproc, "out of swap space");
1476                         info.bigproc->p_nice = PRIO_MIN;
1477                         info.bigproc->p_usched->resetpriority(
1478                                 FIRST_LWP_IN_PROC(info.bigproc));
1479                         wakeup(&vmstats.v_free_count);
1480                         PRELE(info.bigproc);
1481                 }
1482         }
1483 }
1484
1485 /*
1486  * The caller must hold proc_token.
1487  */
1488 static int
1489 vm_pageout_scan_callback(struct proc *p, void *data)
1490 {
1491         struct vm_pageout_scan_info *info = data;
1492         vm_offset_t size;
1493
1494         /*
1495          * Never kill system processes or init.  If we have configured swap
1496          * then try to avoid killing low-numbered pids.
1497          */
1498         if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) ||
1499             ((p->p_pid < 48) && (vm_swap_size != 0))) {
1500                 return (0);
1501         }
1502
1503         /*
1504          * if the process is in a non-running type state,
1505          * don't touch it.
1506          */
1507         if (p->p_stat != SACTIVE && p->p_stat != SSTOP)
1508                 return (0);
1509
1510         /*
1511          * Get the approximate process size.  Note that anonymous pages
1512          * with backing swap will be counted twice, but there should not
1513          * be too many such pages due to the stress the VM system is
1514          * under at this point.
1515          */
1516         size = vmspace_anonymous_count(p->p_vmspace) +
1517                 vmspace_swap_count(p->p_vmspace);
1518
1519         /*
1520          * If the this process is bigger than the biggest one
1521          * remember it.
1522          */
1523         if (info->bigsize < size) {
1524                 if (info->bigproc)
1525                         PRELE(info->bigproc);
1526                 PHOLD(p);
1527                 info->bigproc = p;
1528                 info->bigsize = size;
1529         }
1530         lwkt_yield();
1531         return(0);
1532 }
1533
1534 /*
1535  * This routine tries to maintain the pseudo LRU active queue,
1536  * so that during long periods of time where there is no paging,
1537  * that some statistic accumulation still occurs.  This code
1538  * helps the situation where paging just starts to occur.
1539  */
1540 static void
1541 vm_pageout_page_stats(int q)
1542 {
1543         static int fullintervalcount = 0;
1544         struct vm_page marker;
1545         vm_page_t m;
1546         int pcount, tpcount;            /* Number of pages to check */
1547         int page_shortage;
1548
1549         page_shortage = (vmstats.v_inactive_target + vmstats.v_cache_max +
1550                          vmstats.v_free_min) -
1551                         (vmstats.v_free_count + vmstats.v_inactive_count +
1552                          vmstats.v_cache_count);
1553
1554         if (page_shortage <= 0)
1555                 return;
1556
1557         pcount = vmstats.v_active_count;
1558         fullintervalcount += vm_pageout_stats_interval;
1559         if (fullintervalcount < vm_pageout_full_stats_interval) {
1560                 tpcount = (vm_pageout_stats_max * vmstats.v_active_count) /
1561                           vmstats.v_page_count;
1562                 if (pcount > tpcount)
1563                         pcount = tpcount;
1564         } else {
1565                 fullintervalcount = 0;
1566         }
1567
1568         bzero(&marker, sizeof(marker));
1569         marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
1570         marker.queue = PQ_ACTIVE + q;
1571         marker.pc = q;
1572         marker.wire_count = 1;
1573
1574         vm_page_queues_spin_lock(PQ_ACTIVE + q);
1575         TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1576         vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1577
1578         while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
1579                pcount-- > 0)
1580         {
1581                 int actcount;
1582
1583                 vm_page_and_queue_spin_lock(m);
1584                 if (m != TAILQ_NEXT(&marker, pageq)) {
1585                         vm_page_and_queue_spin_unlock(m);
1586                         ++pcount;
1587                         continue;
1588                 }
1589                 KKASSERT(m->queue - m->pc == PQ_ACTIVE);
1590                 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1591                 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
1592                                    &marker, pageq);
1593
1594                 /*
1595                  * Ignore markers
1596                  */
1597                 if (m->flags & PG_MARKER) {
1598                         vm_page_and_queue_spin_unlock(m);
1599                         continue;
1600                 }
1601
1602                 /*
1603                  * Ignore pages we can't busy
1604                  */
1605                 if (vm_page_busy_try(m, TRUE)) {
1606                         vm_page_and_queue_spin_unlock(m);
1607                         continue;
1608                 }
1609                 vm_page_and_queue_spin_unlock(m);
1610                 KKASSERT(m->queue - m->pc == PQ_ACTIVE);
1611
1612                 /*
1613                  * We now have a safely busied page, the page and queue
1614                  * spinlocks have been released.
1615                  *
1616                  * Ignore held pages
1617                  */
1618                 if (m->hold_count) {
1619                         vm_page_wakeup(m);
1620                         continue;
1621                 }
1622
1623                 /*
1624                  * Calculate activity
1625                  */
1626                 actcount = 0;
1627                 if (m->flags & PG_REFERENCED) {
1628                         vm_page_flag_clear(m, PG_REFERENCED);
1629                         actcount += 1;
1630                 }
1631                 actcount += pmap_ts_referenced(m);
1632
1633                 /*
1634                  * Update act_count and move page to end of queue.
1635                  */
1636                 if (actcount) {
1637                         m->act_count += ACT_ADVANCE + actcount;
1638                         if (m->act_count > ACT_MAX)
1639                                 m->act_count = ACT_MAX;
1640                         vm_page_and_queue_spin_lock(m);
1641                         if (m->queue - m->pc == PQ_ACTIVE) {
1642                                 TAILQ_REMOVE(
1643                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1644                                         m, pageq);
1645                                 TAILQ_INSERT_TAIL(
1646                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1647                                         m, pageq);
1648                         }
1649                         vm_page_and_queue_spin_unlock(m);
1650                         vm_page_wakeup(m);
1651                         continue;
1652                 }
1653
1654                 if (m->act_count == 0) {
1655                         /*
1656                          * We turn off page access, so that we have
1657                          * more accurate RSS stats.  We don't do this
1658                          * in the normal page deactivation when the
1659                          * system is loaded VM wise, because the
1660                          * cost of the large number of page protect
1661                          * operations would be higher than the value
1662                          * of doing the operation.
1663                          *
1664                          * We use the marker to save our place so
1665                          * we can release the spin lock.  both (m)
1666                          * and (next) will be invalid.
1667                          */
1668                         vm_page_protect(m, VM_PROT_NONE);
1669                         vm_page_deactivate(m);
1670                 } else {
1671                         m->act_count -= min(m->act_count, ACT_DECLINE);
1672                         vm_page_and_queue_spin_lock(m);
1673                         if (m->queue - m->pc == PQ_ACTIVE) {
1674                                 TAILQ_REMOVE(
1675                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1676                                         m, pageq);
1677                                 TAILQ_INSERT_TAIL(
1678                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1679                                         m, pageq);
1680                         }
1681                         vm_page_and_queue_spin_unlock(m);
1682                 }
1683                 vm_page_wakeup(m);
1684         }
1685
1686         /*
1687          * Remove our local marker
1688          */
1689         vm_page_queues_spin_lock(PQ_ACTIVE + q);
1690         TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1691         vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1692 }
1693
1694 static int
1695 vm_pageout_free_page_calc(vm_size_t count)
1696 {
1697         if (count < vmstats.v_page_count)
1698                  return 0;
1699         /*
1700          * free_reserved needs to include enough for the largest swap pager
1701          * structures plus enough for any pv_entry structs when paging.
1702          *
1703          * v_free_min           normal allocations
1704          * v_free_reserved      system allocations
1705          * v_pageout_free_min   allocations by pageout daemon
1706          * v_interrupt_free_min low level allocations (e.g swap structures)
1707          */
1708         if (vmstats.v_page_count > 1024)
1709                 vmstats.v_free_min = 64 + (vmstats.v_page_count - 1024) / 200;
1710         else
1711                 vmstats.v_free_min = 64;
1712         vmstats.v_free_reserved = vmstats.v_free_min * 4 / 8 + 7;
1713         vmstats.v_free_severe = vmstats.v_free_min * 4 / 8 + 0;
1714         vmstats.v_pageout_free_min = vmstats.v_free_min * 2 / 8 + 7;
1715         vmstats.v_interrupt_free_min = vmstats.v_free_min * 1 / 8 + 7;
1716
1717         return 1;
1718 }
1719
1720
1721 /*
1722  * vm_pageout is the high level pageout daemon.
1723  *
1724  * No requirements.
1725  */
1726 static void
1727 vm_pageout_thread(void)
1728 {
1729         int pass;
1730         int q;
1731
1732         /*
1733          * Initialize some paging parameters.
1734          */
1735         curthread->td_flags |= TDF_SYSTHREAD;
1736
1737         if (vmstats.v_page_count < 2000)
1738                 vm_pageout_page_count = 8;
1739
1740         vm_pageout_free_page_calc(vmstats.v_page_count);
1741
1742         /*
1743          * v_free_target and v_cache_min control pageout hysteresis.  Note
1744          * that these are more a measure of the VM cache queue hysteresis
1745          * then the VM free queue.  Specifically, v_free_target is the
1746          * high water mark (free+cache pages).
1747          *
1748          * v_free_reserved + v_cache_min (mostly means v_cache_min) is the
1749          * low water mark, while v_free_min is the stop.  v_cache_min must
1750          * be big enough to handle memory needs while the pageout daemon
1751          * is signalled and run to free more pages.
1752          */
1753         if (vmstats.v_free_count > 6144)
1754                 vmstats.v_free_target = 4 * vmstats.v_free_min + vmstats.v_free_reserved;
1755         else
1756                 vmstats.v_free_target = 2 * vmstats.v_free_min + vmstats.v_free_reserved;
1757
1758         /*
1759          * NOTE: With the new buffer cache b_act_count we want the default
1760          *       inactive target to be a percentage of available memory.
1761          *
1762          *       The inactive target essentially determines the minimum
1763          *       number of 'temporary' pages capable of caching one-time-use
1764          *       files when the VM system is otherwise full of pages
1765          *       belonging to multi-time-use files or active program data.
1766          *
1767          * NOTE: The inactive target is aggressively persued only if the
1768          *       inactive queue becomes too small.  If the inactive queue
1769          *       is large enough to satisfy page movement to free+cache
1770          *       then it is repopulated more slowly from the active queue.
1771          *       This allows a general inactive_target default to be set.
1772          *
1773          *       There is an issue here for processes which sit mostly idle
1774          *       'overnight', such as sshd, tcsh, and X.  Any movement from
1775          *       the active queue will eventually cause such pages to
1776          *       recycle eventually causing a lot of paging in the morning.
1777          *       To reduce the incidence of this pages cycled out of the
1778          *       buffer cache are moved directly to the inactive queue if
1779          *       they were only used once or twice.
1780          *
1781          *       The vfs.vm_cycle_point sysctl can be used to adjust this.
1782          *       Increasing the value (up to 64) increases the number of
1783          *       buffer recyclements which go directly to the inactive queue.
1784          */
1785         if (vmstats.v_free_count > 2048) {
1786                 vmstats.v_cache_min = vmstats.v_free_target;
1787                 vmstats.v_cache_max = 2 * vmstats.v_cache_min;
1788         } else {
1789                 vmstats.v_cache_min = 0;
1790                 vmstats.v_cache_max = 0;
1791         }
1792         vmstats.v_inactive_target = vmstats.v_free_count / 4;
1793
1794         /* XXX does not really belong here */
1795         if (vm_page_max_wired == 0)
1796                 vm_page_max_wired = vmstats.v_free_count / 3;
1797
1798         if (vm_pageout_stats_max == 0)
1799                 vm_pageout_stats_max = vmstats.v_free_target;
1800
1801         /*
1802          * Set interval in seconds for stats scan.
1803          */
1804         if (vm_pageout_stats_interval == 0)
1805                 vm_pageout_stats_interval = 5;
1806         if (vm_pageout_full_stats_interval == 0)
1807                 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
1808         
1809
1810         /*
1811          * Set maximum free per pass
1812          */
1813         if (vm_pageout_stats_free_max == 0)
1814                 vm_pageout_stats_free_max = 5;
1815
1816         swap_pager_swap_init();
1817         pass = 0;
1818
1819         /*
1820          * The pageout daemon is never done, so loop forever.
1821          */
1822         while (TRUE) {
1823                 int error;
1824                 int delta1;
1825                 int delta2;
1826                 int inactive_shortage;
1827                 int active_shortage;
1828                 int vnodes_skipped = 0;
1829                 int recycle_count = 0;
1830                 int tmp;
1831
1832                 /*
1833                  * Wait for an action request.  If we timeout check to
1834                  * see if paging is needed (in case the normal wakeup
1835                  * code raced us).
1836                  */
1837                 if (vm_pages_needed == 0) {
1838                         error = tsleep(&vm_pages_needed,
1839                                        0, "psleep",
1840                                        vm_pageout_stats_interval * hz);
1841                         if (error &&
1842                             vm_paging_needed() == 0 &&
1843                             vm_pages_needed == 0) {
1844                                 for (q = 0; q < PQ_MAXL2_SIZE; ++q)
1845                                         vm_pageout_page_stats(q);
1846                                 continue;
1847                         }
1848                         vm_pages_needed = 1;
1849                 }
1850
1851                 mycpu->gd_cnt.v_pdwakeups++;
1852
1853                 /*
1854                  * Do whatever cleanup that the pmap code can.
1855                  */
1856                 pmap_collect();
1857
1858                 /*
1859                  * Scan for pageout.  Try to avoid thrashing the system
1860                  * with activity.
1861                  *
1862                  * Calculate our target for the number of free+cache pages we
1863                  * want to get to.  This is higher then the number that causes
1864                  * allocations to stall (severe) in order to provide hysteresis,
1865                  * and if we don't make it all the way but get to the minimum
1866                  * we're happy.
1867                  */
1868                 inactive_shortage = vm_paging_target() + vm_pageout_deficit;
1869                 vm_pageout_deficit = 0;
1870                 delta1 = 0;
1871                 for (q = 0; q < PQ_MAXL2_SIZE; ++q) {
1872                         delta1 += vm_pageout_scan_inactive(
1873                                         pass, q,
1874                                         inactive_shortage / PQ_MAXL2_SIZE + 1,
1875                                         &vnodes_skipped);
1876                 }
1877
1878                 /*
1879                  * Figure out how many active pages we must deactivate.  If
1880                  * we were able to reach our target with just the inactive
1881                  * scan above we limit the number of active pages we
1882                  * deactivate to reduce unnecessary work.
1883                  */
1884                 active_shortage = vmstats.v_inactive_target -
1885                                   vmstats.v_inactive_count;
1886
1887                 /*
1888                  * If we were unable to free sufficient inactive pages to
1889                  * satisfy the free/cache queue requirements then simply
1890                  * reaching the inactive target may not be good enough.
1891                  * Try to deactivate pages in excess of the target based
1892                  * on the shortfall.
1893                  *
1894                  * However to prevent thrashing the VM system do not
1895                  * deactivate more than an additional 1/10 the inactive
1896                  * target's worth of active pages.
1897                  */
1898                 if (delta1 < inactive_shortage) {
1899                         tmp = (inactive_shortage - delta1) * 2;
1900                         if (tmp > vmstats.v_inactive_target / 10)
1901                                 tmp = vmstats.v_inactive_target / 10;
1902                         active_shortage += tmp;
1903                 }
1904
1905                 delta2 = 0;
1906                 for (q = 0; q < PQ_MAXL2_SIZE; ++q) {
1907                         delta2 += vm_pageout_scan_active(
1908                                         pass, q,
1909                                         inactive_shortage / PQ_MAXL2_SIZE + 1,
1910                                         active_shortage / PQ_MAXL2_SIZE + 1,
1911                                         &recycle_count);
1912                 }
1913
1914                 /*
1915                  * Finally free enough cache pages to meet our free page
1916                  * requirement and take more drastic measures if we are
1917                  * still in trouble.
1918                  */
1919                 inactive_shortage -= delta2;
1920                 vm_pageout_scan_cache(inactive_shortage, vnodes_skipped,
1921                                       recycle_count);
1922
1923                 /*
1924                  * Wait for more work.
1925                  */
1926                 if (inactive_shortage > 0) {
1927                         ++pass;
1928                         if (swap_pager_full) {
1929                                 /*
1930                                  * Running out of memory, catastrophic back-off
1931                                  * to one-second intervals.
1932                                  */
1933                                 tsleep(&vm_pages_needed, 0, "pdelay", hz);
1934                         } else if (pass < 10 && vm_pages_needed > 1) {
1935                                 /*
1936                                  * Normal operation, additional processes
1937                                  * have already kicked us.  Retry immediately.
1938                                  */
1939                         } else if (pass < 10) {
1940                                 /*
1941                                  * Normal operation, fewer processes.  Delay
1942                                  * a bit but allow wakeups.
1943                                  */
1944                                 vm_pages_needed = 0;
1945                                 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
1946                                 vm_pages_needed = 1;
1947                         } else {
1948                                 /*
1949                                  * We've taken too many passes, forced delay.
1950                                  */
1951                                 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
1952                         }
1953                 } else {
1954                         /*
1955                          * Interlocked wakeup of waiters (non-optional)
1956                          */
1957                         pass = 0;
1958                         if (vm_pages_needed && !vm_page_count_min(0)) {
1959                                 wakeup(&vmstats.v_free_count);
1960                                 vm_pages_needed = 0;
1961                         }
1962                 }
1963         }
1964 }
1965
1966 static struct kproc_desc page_kp = {
1967         "pagedaemon",
1968         vm_pageout_thread,
1969         &pagethread
1970 };
1971 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp)
1972
1973
1974 /*
1975  * Called after allocating a page out of the cache or free queue
1976  * to possibly wake the pagedaemon up to replentish our supply.
1977  *
1978  * We try to generate some hysteresis by waking the pagedaemon up
1979  * when our free+cache pages go below the free_min+cache_min level.
1980  * The pagedaemon tries to get the count back up to at least the
1981  * minimum, and through to the target level if possible.
1982  *
1983  * If the pagedaemon is already active bump vm_pages_needed as a hint
1984  * that there are even more requests pending.
1985  *
1986  * SMP races ok?
1987  * No requirements.
1988  */
1989 void
1990 pagedaemon_wakeup(void)
1991 {
1992         if (vm_paging_needed() && curthread != pagethread) {
1993                 if (vm_pages_needed == 0) {
1994                         vm_pages_needed = 1;    /* SMP race ok */
1995                         wakeup(&vm_pages_needed);
1996                 } else if (vm_page_count_min(0)) {
1997                         ++vm_pages_needed;      /* SMP race ok */
1998                 }
1999         }
2000 }
2001
2002 #if !defined(NO_SWAPPING)
2003
2004 /*
2005  * SMP races ok?
2006  * No requirements.
2007  */
2008 static void
2009 vm_req_vmdaemon(void)
2010 {
2011         static int lastrun = 0;
2012
2013         if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
2014                 wakeup(&vm_daemon_needed);
2015                 lastrun = ticks;
2016         }
2017 }
2018
2019 static int vm_daemon_callback(struct proc *p, void *data __unused);
2020
2021 /*
2022  * No requirements.
2023  */
2024 static void
2025 vm_daemon(void)
2026 {
2027         /*
2028          * XXX vm_daemon_needed specific token?
2029          */
2030         while (TRUE) {
2031                 tsleep(&vm_daemon_needed, 0, "psleep", 0);
2032                 if (vm_pageout_req_swapout) {
2033                         swapout_procs(vm_pageout_req_swapout);
2034                         vm_pageout_req_swapout = 0;
2035                 }
2036                 /*
2037                  * scan the processes for exceeding their rlimits or if
2038                  * process is swapped out -- deactivate pages
2039                  */
2040                 allproc_scan(vm_daemon_callback, NULL);
2041         }
2042 }
2043
2044 /*
2045  * Caller must hold proc_token.
2046  */
2047 static int
2048 vm_daemon_callback(struct proc *p, void *data __unused)
2049 {
2050         vm_pindex_t limit, size;
2051
2052         /*
2053          * if this is a system process or if we have already
2054          * looked at this process, skip it.
2055          */
2056         if (p->p_flag & (P_SYSTEM | P_WEXIT))
2057                 return (0);
2058
2059         /*
2060          * if the process is in a non-running type state,
2061          * don't touch it.
2062          */
2063         if (p->p_stat != SACTIVE && p->p_stat != SSTOP)
2064                 return (0);
2065
2066         /*
2067          * get a limit
2068          */
2069         limit = OFF_TO_IDX(qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
2070                                 p->p_rlimit[RLIMIT_RSS].rlim_max));
2071
2072         /*
2073          * let processes that are swapped out really be
2074          * swapped out.  Set the limit to nothing to get as
2075          * many pages out to swap as possible.
2076          */
2077         if (p->p_flag & P_SWAPPEDOUT)
2078                 limit = 0;
2079
2080         lwkt_gettoken(&p->p_vmspace->vm_map.token);
2081         size = vmspace_resident_count(p->p_vmspace);
2082         if (limit >= 0 && size >= limit) {
2083                 vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map, limit);
2084         }
2085         lwkt_reltoken(&p->p_vmspace->vm_map.token);
2086         return (0);
2087 }
2088
2089 #endif