kernel - reformulate some of the pmap code to adhere to the new rules
[dragonfly.git] / sys / vm / vm_pageout.c
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  *
11  * This code is derived from software contributed to Berkeley by
12  * The Mach Operating System project at Carnegie-Mellon University.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *      from: @(#)vm_pageout.c  7.4 (Berkeley) 5/7/91
39  *
40  *
41  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42  * All rights reserved.
43  *
44  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45  *
46  * Permission to use, copy, modify and distribute this software and
47  * its documentation is hereby granted, provided that both the copyright
48  * notice and this permission notice appear in all copies of the
49  * software, derivative works or modified versions, and any portions
50  * thereof, and that both notices appear in supporting documentation.
51  *
52  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55  *
56  * Carnegie Mellon requests users of this software to return to
57  *
58  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
59  *  School of Computer Science
60  *  Carnegie Mellon University
61  *  Pittsburgh PA 15213-3890
62  *
63  * any improvements or extensions that they make and grant Carnegie the
64  * rights to redistribute these changes.
65  *
66  * $FreeBSD: src/sys/vm/vm_pageout.c,v 1.151.2.15 2002/12/29 18:21:04 dillon Exp $
67  */
68
69 /*
70  *      The proverbial page-out daemon.
71  */
72
73 #include "opt_vm.h"
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/kernel.h>
77 #include <sys/proc.h>
78 #include <sys/kthread.h>
79 #include <sys/resourcevar.h>
80 #include <sys/signalvar.h>
81 #include <sys/vnode.h>
82 #include <sys/vmmeter.h>
83 #include <sys/sysctl.h>
84
85 #include <vm/vm.h>
86 #include <vm/vm_param.h>
87 #include <sys/lock.h>
88 #include <vm/vm_object.h>
89 #include <vm/vm_page.h>
90 #include <vm/vm_map.h>
91 #include <vm/vm_pageout.h>
92 #include <vm/vm_pager.h>
93 #include <vm/swap_pager.h>
94 #include <vm/vm_extern.h>
95
96 #include <sys/thread2.h>
97 #include <sys/spinlock2.h>
98 #include <vm/vm_page2.h>
99
100 /*
101  * System initialization
102  */
103
104 /* the kernel process "vm_pageout"*/
105 static int vm_pageout_clean (vm_page_t);
106 static int vm_pageout_free_page_calc (vm_size_t count);
107 struct thread *pagethread;
108
109 #if !defined(NO_SWAPPING)
110 /* the kernel process "vm_daemon"*/
111 static void vm_daemon (void);
112 static struct   thread *vmthread;
113
114 static struct kproc_desc vm_kp = {
115         "vmdaemon",
116         vm_daemon,
117         &vmthread
118 };
119 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp)
120 #endif
121
122
123 int vm_pages_needed=0;          /* Event on which pageout daemon sleeps */
124 int vm_pageout_deficit=0;       /* Estimated number of pages deficit */
125 int vm_pageout_pages_needed=0;  /* flag saying that the pageout daemon needs pages */
126
127 #if !defined(NO_SWAPPING)
128 static int vm_pageout_req_swapout;      /* XXX */
129 static int vm_daemon_needed;
130 #endif
131 static int vm_max_launder = 32;
132 static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
133 static int vm_pageout_full_stats_interval = 0;
134 static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0;
135 static int defer_swap_pageouts=0;
136 static int disable_swap_pageouts=0;
137
138 #if defined(NO_SWAPPING)
139 static int vm_swap_enabled=0;
140 static int vm_swap_idle_enabled=0;
141 #else
142 static int vm_swap_enabled=1;
143 static int vm_swap_idle_enabled=0;
144 #endif
145
146 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm,
147         CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt");
148
149 SYSCTL_INT(_vm, OID_AUTO, max_launder,
150         CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout");
151
152 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
153         CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
154
155 SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
156         CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
157
158 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
159         CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
160
161 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
162         CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented");
163
164 #if defined(NO_SWAPPING)
165 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
166         CTLFLAG_RD, &vm_swap_enabled, 0, "");
167 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
168         CTLFLAG_RD, &vm_swap_idle_enabled, 0, "");
169 #else
170 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
171         CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
172 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
173         CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
174 #endif
175
176 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
177         CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
178
179 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
180         CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
181
182 static int pageout_lock_miss;
183 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
184         CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
185
186 int vm_load;
187 SYSCTL_INT(_vm, OID_AUTO, vm_load,
188         CTLFLAG_RD, &vm_load, 0, "load on the VM system");
189 int vm_load_enable = 1;
190 SYSCTL_INT(_vm, OID_AUTO, vm_load_enable,
191         CTLFLAG_RW, &vm_load_enable, 0, "enable vm_load rate limiting");
192 #ifdef INVARIANTS
193 int vm_load_debug;
194 SYSCTL_INT(_vm, OID_AUTO, vm_load_debug,
195         CTLFLAG_RW, &vm_load_debug, 0, "debug vm_load");
196 #endif
197
198 #define VM_PAGEOUT_PAGE_COUNT 16
199 int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
200
201 int vm_page_max_wired;          /* XXX max # of wired pages system-wide */
202
203 #if !defined(NO_SWAPPING)
204 typedef void freeer_fcn_t (vm_map_t, vm_object_t, vm_pindex_t, int);
205 static void vm_pageout_map_deactivate_pages (vm_map_t, vm_pindex_t);
206 static freeer_fcn_t vm_pageout_object_deactivate_pages;
207 static void vm_req_vmdaemon (void);
208 #endif
209 static void vm_pageout_page_stats(int q);
210
211 /*
212  * Update vm_load to slow down faulting processes.
213  *
214  * SMP races ok.
215  * No requirements.
216  */
217 void
218 vm_fault_ratecheck(void)
219 {
220         if (vm_pages_needed) {
221                 if (vm_load < 1000)
222                         ++vm_load;
223         } else {
224                 if (vm_load > 0)
225                         --vm_load;
226         }
227 }
228
229 /*
230  * vm_pageout_clean:
231  *
232  * Clean the page and remove it from the laundry.  The page must not be
233  * busy on-call.
234  * 
235  * We set the busy bit to cause potential page faults on this page to
236  * block.  Note the careful timing, however, the busy bit isn't set till
237  * late and we cannot do anything that will mess with the page.
238  */
239 static int
240 vm_pageout_clean(vm_page_t m)
241 {
242         vm_object_t object;
243         vm_page_t mc[2*vm_pageout_page_count];
244         int pageout_count;
245         int error;
246         int ib, is, page_base;
247         vm_pindex_t pindex = m->pindex;
248
249         object = m->object;
250
251         /*
252          * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
253          * with the new swapper, but we could have serious problems paging
254          * out other object types if there is insufficient memory.  
255          *
256          * Unfortunately, checking free memory here is far too late, so the
257          * check has been moved up a procedural level.
258          */
259
260         /*
261          * Don't mess with the page if it's busy, held, or special
262          *
263          * XXX do we really need to check hold_count here?  hold_count
264          * isn't supposed to mess with vm_page ops except prevent the
265          * page from being reused.
266          */
267         if (m->hold_count != 0 || (m->flags & PG_UNMANAGED)) {
268                 vm_page_wakeup(m);
269                 return 0;
270         }
271
272         mc[vm_pageout_page_count] = m;
273         pageout_count = 1;
274         page_base = vm_pageout_page_count;
275         ib = 1;
276         is = 1;
277
278         /*
279          * Scan object for clusterable pages.
280          *
281          * We can cluster ONLY if: ->> the page is NOT
282          * clean, wired, busy, held, or mapped into a
283          * buffer, and one of the following:
284          * 1) The page is inactive, or a seldom used
285          *    active page.
286          * -or-
287          * 2) we force the issue.
288          *
289          * During heavy mmap/modification loads the pageout
290          * daemon can really fragment the underlying file
291          * due to flushing pages out of order and not trying
292          * align the clusters (which leave sporatic out-of-order
293          * holes).  To solve this problem we do the reverse scan
294          * first and attempt to align our cluster, then do a 
295          * forward scan if room remains.
296          */
297
298         vm_object_hold(object);
299 more:
300         while (ib && pageout_count < vm_pageout_page_count) {
301                 vm_page_t p;
302
303                 if (ib > pindex) {
304                         ib = 0;
305                         break;
306                 }
307
308                 p = vm_page_lookup_busy_try(object, pindex - ib, TRUE, &error);
309                 if (error || p == NULL) {
310                         ib = 0;
311                         break;
312                 }
313                 if ((p->queue - p->pc) == PQ_CACHE ||
314                     (p->flags & PG_UNMANAGED)) {
315                         vm_page_wakeup(p);
316                         ib = 0;
317                         break;
318                 }
319                 vm_page_test_dirty(p);
320                 if ((p->dirty & p->valid) == 0 ||
321                     p->queue - p->pc != PQ_INACTIVE ||
322                     p->wire_count != 0 ||       /* may be held by buf cache */
323                     p->hold_count != 0) {       /* may be undergoing I/O */
324                         vm_page_wakeup(p);
325                         ib = 0;
326                         break;
327                 }
328                 mc[--page_base] = p;
329                 ++pageout_count;
330                 ++ib;
331                 /*
332                  * alignment boundry, stop here and switch directions.  Do
333                  * not clear ib.
334                  */
335                 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
336                         break;
337         }
338
339         while (pageout_count < vm_pageout_page_count && 
340             pindex + is < object->size) {
341                 vm_page_t p;
342
343                 p = vm_page_lookup_busy_try(object, pindex + is, TRUE, &error);
344                 if (error || p == NULL)
345                         break;
346                 if (((p->queue - p->pc) == PQ_CACHE) ||
347                     (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) {
348                         vm_page_wakeup(p);
349                         break;
350                 }
351                 vm_page_test_dirty(p);
352                 if ((p->dirty & p->valid) == 0 ||
353                     p->queue - p->pc != PQ_INACTIVE ||
354                     p->wire_count != 0 ||       /* may be held by buf cache */
355                     p->hold_count != 0) {       /* may be undergoing I/O */
356                         vm_page_wakeup(p);
357                         break;
358                 }
359                 mc[page_base + pageout_count] = p;
360                 ++pageout_count;
361                 ++is;
362         }
363
364         /*
365          * If we exhausted our forward scan, continue with the reverse scan
366          * when possible, even past a page boundry.  This catches boundry
367          * conditions.
368          */
369         if (ib && pageout_count < vm_pageout_page_count)
370                 goto more;
371
372         vm_object_drop(object);
373
374         /*
375          * we allow reads during pageouts...
376          */
377         return vm_pageout_flush(&mc[page_base], pageout_count, 0);
378 }
379
380 /*
381  * vm_pageout_flush() - launder the given pages
382  *
383  *      The given pages are laundered.  Note that we setup for the start of
384  *      I/O ( i.e. busy the page ), mark it read-only, and bump the object
385  *      reference count all in here rather then in the parent.  If we want
386  *      the parent to do more sophisticated things we may have to change
387  *      the ordering.
388  *
389  *      The pages in the array must be busied by the caller and will be
390  *      unbusied by this function.
391  */
392 int
393 vm_pageout_flush(vm_page_t *mc, int count, int flags)
394 {
395         vm_object_t object;
396         int pageout_status[count];
397         int numpagedout = 0;
398         int i;
399
400         /*
401          * Initiate I/O.  Bump the vm_page_t->busy counter.
402          */
403         for (i = 0; i < count; i++) {
404                 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
405                         ("vm_pageout_flush page %p index %d/%d: partially "
406                          "invalid page", mc[i], i, count));
407                 vm_page_io_start(mc[i]);
408         }
409
410         /*
411          * We must make the pages read-only.  This will also force the
412          * modified bit in the related pmaps to be cleared.  The pager
413          * cannot clear the bit for us since the I/O completion code
414          * typically runs from an interrupt.  The act of making the page
415          * read-only handles the case for us.
416          *
417          * Then we can unbusy the pages, we still hold a reference by virtue
418          * of our soft-busy.
419          */
420         for (i = 0; i < count; i++) {
421                 vm_page_protect(mc[i], VM_PROT_READ);
422                 vm_page_wakeup(mc[i]);
423         }
424
425         object = mc[0]->object;
426         vm_object_pip_add(object, count);
427
428         vm_pager_put_pages(object, mc, count,
429             (flags | ((object == &kernel_object) ? VM_PAGER_PUT_SYNC : 0)),
430             pageout_status);
431
432         for (i = 0; i < count; i++) {
433                 vm_page_t mt = mc[i];
434
435                 switch (pageout_status[i]) {
436                 case VM_PAGER_OK:
437                         numpagedout++;
438                         break;
439                 case VM_PAGER_PEND:
440                         numpagedout++;
441                         break;
442                 case VM_PAGER_BAD:
443                         /*
444                          * Page outside of range of object. Right now we
445                          * essentially lose the changes by pretending it
446                          * worked.
447                          */
448                         vm_page_busy_wait(mt, FALSE, "pgbad");
449                         pmap_clear_modify(mt);
450                         vm_page_undirty(mt);
451                         vm_page_wakeup(mt);
452                         break;
453                 case VM_PAGER_ERROR:
454                 case VM_PAGER_FAIL:
455                         /*
456                          * A page typically cannot be paged out when we
457                          * have run out of swap.  We leave the page
458                          * marked inactive and will try to page it out
459                          * again later.
460                          *
461                          * Starvation of the active page list is used to
462                          * determine when the system is massively memory
463                          * starved.
464                          */
465                         break;
466                 case VM_PAGER_AGAIN:
467                         break;
468                 }
469
470                 /*
471                  * If the operation is still going, leave the page busy to
472                  * block all other accesses. Also, leave the paging in
473                  * progress indicator set so that we don't attempt an object
474                  * collapse.
475                  *
476                  * For any pages which have completed synchronously, 
477                  * deactivate the page if we are under a severe deficit.
478                  * Do not try to enter them into the cache, though, they
479                  * might still be read-heavy.
480                  */
481                 if (pageout_status[i] != VM_PAGER_PEND) {
482                         vm_page_busy_wait(mt, FALSE, "pgouw");
483                         if (vm_page_count_severe())
484                                 vm_page_deactivate(mt);
485 #if 0
486                         if (!vm_page_count_severe() || !vm_page_try_to_cache(mt))
487                                 vm_page_protect(mt, VM_PROT_READ);
488 #endif
489                         vm_page_io_finish(mt);
490                         vm_page_wakeup(mt);
491                         vm_object_pip_wakeup(object);
492                 }
493         }
494         return numpagedout;
495 }
496
497 #if !defined(NO_SWAPPING)
498 /*
499  * deactivate enough pages to satisfy the inactive target
500  * requirements or if vm_page_proc_limit is set, then
501  * deactivate all of the pages in the object and its
502  * backing_objects.
503  *
504  * The map must be locked.
505  * The caller must hold the vm_object.
506  */
507 static int vm_pageout_object_deactivate_pages_callback(vm_page_t, void *);
508
509 static void
510 vm_pageout_object_deactivate_pages(vm_map_t map, vm_object_t object,
511                                    vm_pindex_t desired, int map_remove_only)
512 {
513         struct rb_vm_page_scan_info info;
514         vm_object_t lobject;
515         vm_object_t tobject;
516         int remove_mode;
517
518         ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
519         lobject = object;
520
521         while (lobject) {
522                 if (pmap_resident_count(vm_map_pmap(map)) <= desired)
523                         break;
524                 if (lobject->type == OBJT_DEVICE || lobject->type == OBJT_PHYS)
525                         break;
526                 if (lobject->paging_in_progress)
527                         break;
528
529                 remove_mode = map_remove_only;
530                 if (lobject->shadow_count > 1)
531                         remove_mode = 1;
532
533                 /*
534                  * scan the objects entire memory queue.  We hold the
535                  * object's token so the scan should not race anything.
536                  */
537                 info.limit = remove_mode;
538                 info.map = map;
539                 info.desired = desired;
540                 vm_page_rb_tree_RB_SCAN(&lobject->rb_memq, NULL,
541                                 vm_pageout_object_deactivate_pages_callback,
542                                 &info
543                 );
544                 while ((tobject = lobject->backing_object) != NULL) {
545                         KKASSERT(tobject != object);
546                         vm_object_hold(tobject);
547                         if (tobject == lobject->backing_object)
548                                 break;
549                         vm_object_drop(tobject);
550                 }
551                 if (lobject != object) {
552                         vm_object_lock_swap();
553                         vm_object_drop(lobject);
554                 }
555                 lobject = tobject;
556         }
557         if (lobject != object)
558                 vm_object_drop(lobject);
559 }
560
561 /*
562  * The caller must hold the vm_object.
563  */
564 static int
565 vm_pageout_object_deactivate_pages_callback(vm_page_t p, void *data)
566 {
567         struct rb_vm_page_scan_info *info = data;
568         int actcount;
569
570         if (pmap_resident_count(vm_map_pmap(info->map)) <= info->desired) {
571                 return(-1);
572         }
573         mycpu->gd_cnt.v_pdpages++;
574
575         if (vm_page_busy_try(p, TRUE))
576                 return(0);
577         if (p->wire_count || p->hold_count || (p->flags & PG_UNMANAGED)) {
578                 vm_page_wakeup(p);
579                 return(0);
580         }
581         if (!pmap_page_exists_quick(vm_map_pmap(info->map), p)) {
582                 vm_page_wakeup(p);
583                 return(0);
584         }
585
586         actcount = pmap_ts_referenced(p);
587         if (actcount) {
588                 vm_page_flag_set(p, PG_REFERENCED);
589         } else if (p->flags & PG_REFERENCED) {
590                 actcount = 1;
591         }
592
593         vm_page_and_queue_spin_lock(p);
594         if (p->queue - p->pc != PQ_ACTIVE && (p->flags & PG_REFERENCED)) {
595                 vm_page_and_queue_spin_unlock(p);
596                 vm_page_activate(p);
597                 p->act_count += actcount;
598                 vm_page_flag_clear(p, PG_REFERENCED);
599         } else if (p->queue - p->pc == PQ_ACTIVE) {
600                 if ((p->flags & PG_REFERENCED) == 0) {
601                         p->act_count -= min(p->act_count, ACT_DECLINE);
602                         if (!info->limit &&
603                             (vm_pageout_algorithm || (p->act_count == 0))) {
604                                 vm_page_and_queue_spin_unlock(p);
605                                 vm_page_protect(p, VM_PROT_NONE);
606                                 vm_page_deactivate(p);
607                         } else {
608                                 TAILQ_REMOVE(&vm_page_queues[p->queue].pl,
609                                              p, pageq);
610                                 TAILQ_INSERT_TAIL(&vm_page_queues[p->queue].pl,
611                                                   p, pageq);
612                                 vm_page_and_queue_spin_unlock(p);
613                         }
614                 } else {
615                         vm_page_and_queue_spin_unlock(p);
616                         vm_page_activate(p);
617                         vm_page_flag_clear(p, PG_REFERENCED);
618
619                         vm_page_and_queue_spin_lock(p);
620                         if (p->queue - p->pc == PQ_ACTIVE) {
621                                 if (p->act_count < (ACT_MAX - ACT_ADVANCE))
622                                         p->act_count += ACT_ADVANCE;
623                                 TAILQ_REMOVE(&vm_page_queues[p->queue].pl,
624                                              p, pageq);
625                                 TAILQ_INSERT_TAIL(&vm_page_queues[p->queue].pl,
626                                                   p, pageq);
627                         }
628                         vm_page_and_queue_spin_unlock(p);
629                 }
630         } else if (p->queue - p->pc == PQ_INACTIVE) {
631                 vm_page_and_queue_spin_unlock(p);
632                 vm_page_protect(p, VM_PROT_NONE);
633         } else {
634                 vm_page_and_queue_spin_unlock(p);
635         }
636         vm_page_wakeup(p);
637         return(0);
638 }
639
640 /*
641  * Deactivate some number of pages in a map, try to do it fairly, but
642  * that is really hard to do.
643  */
644 static void
645 vm_pageout_map_deactivate_pages(vm_map_t map, vm_pindex_t desired)
646 {
647         vm_map_entry_t tmpe;
648         vm_object_t obj, bigobj;
649         int nothingwired;
650
651         if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT)) {
652                 return;
653         }
654
655         bigobj = NULL;
656         nothingwired = TRUE;
657
658         /*
659          * first, search out the biggest object, and try to free pages from
660          * that.
661          */
662         tmpe = map->header.next;
663         while (tmpe != &map->header) {
664                 switch(tmpe->maptype) {
665                 case VM_MAPTYPE_NORMAL:
666                 case VM_MAPTYPE_VPAGETABLE:
667                         obj = tmpe->object.vm_object;
668                         if ((obj != NULL) && (obj->shadow_count <= 1) &&
669                                 ((bigobj == NULL) ||
670                                  (bigobj->resident_page_count < obj->resident_page_count))) {
671                                 bigobj = obj;
672                         }
673                         break;
674                 default:
675                         break;
676                 }
677                 if (tmpe->wired_count > 0)
678                         nothingwired = FALSE;
679                 tmpe = tmpe->next;
680         }
681
682         if (bigobj)  {
683                 vm_object_hold(bigobj);
684                 vm_pageout_object_deactivate_pages(map, bigobj, desired, 0);
685                 vm_object_drop(bigobj);
686         }
687
688         /*
689          * Next, hunt around for other pages to deactivate.  We actually
690          * do this search sort of wrong -- .text first is not the best idea.
691          */
692         tmpe = map->header.next;
693         while (tmpe != &map->header) {
694                 if (pmap_resident_count(vm_map_pmap(map)) <= desired)
695                         break;
696                 switch(tmpe->maptype) {
697                 case VM_MAPTYPE_NORMAL:
698                 case VM_MAPTYPE_VPAGETABLE:
699                         obj = tmpe->object.vm_object;
700                         if (obj) {
701                                 vm_object_hold(obj);
702                                 vm_pageout_object_deactivate_pages(map, obj, desired, 0);
703                                 vm_object_drop(obj);
704                         }
705                         break;
706                 default:
707                         break;
708                 }
709                 tmpe = tmpe->next;
710         };
711
712         /*
713          * Remove all mappings if a process is swapped out, this will free page
714          * table pages.
715          */
716         if (desired == 0 && nothingwired)
717                 pmap_remove(vm_map_pmap(map),
718                             VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
719         vm_map_unlock(map);
720 }
721 #endif
722
723 /*
724  * Called when the pageout scan wants to free a page.  We no longer
725  * try to cycle the vm_object here with a reference & dealloc, which can
726  * cause a non-trivial object collapse in a critical path.
727  *
728  * It is unclear why we cycled the ref_count in the past, perhaps to try
729  * to optimize shadow chain collapses but I don't quite see why it would
730  * be necessary.  An OBJ_DEAD object should terminate any and all vm_pages
731  * synchronously and not have to be kicked-start.
732  */
733 static void
734 vm_pageout_page_free(vm_page_t m) 
735 {
736         vm_page_protect(m, VM_PROT_NONE);
737         vm_page_free(m);
738 }
739
740 /*
741  * vm_pageout_scan does the dirty work for the pageout daemon.
742  */
743 struct vm_pageout_scan_info {
744         struct proc *bigproc;
745         vm_offset_t bigsize;
746 };
747
748 static int vm_pageout_scan_callback(struct proc *p, void *data);
749
750 static int
751 vm_pageout_scan_inactive(int pass, int q, int inactive_shortage,
752                          int *vnodes_skippedp)
753 {
754         vm_page_t m;
755         struct vm_page marker;
756         struct vnode *vpfailed;         /* warning, allowed to be stale */
757         int maxscan;
758         int delta = 0;
759         vm_object_t object;
760         int actcount;
761         int maxlaunder;
762
763         /*
764          * Start scanning the inactive queue for pages we can move to the
765          * cache or free.  The scan will stop when the target is reached or
766          * we have scanned the entire inactive queue.  Note that m->act_count
767          * is not used to form decisions for the inactive queue, only for the
768          * active queue.
769          *
770          * maxlaunder limits the number of dirty pages we flush per scan.
771          * For most systems a smaller value (16 or 32) is more robust under
772          * extreme memory and disk pressure because any unnecessary writes
773          * to disk can result in extreme performance degredation.  However,
774          * systems with excessive dirty pages (especially when MAP_NOSYNC is
775          * used) will die horribly with limited laundering.  If the pageout
776          * daemon cannot clean enough pages in the first pass, we let it go
777          * all out in succeeding passes.
778          */
779         if ((maxlaunder = vm_max_launder) <= 1)
780                 maxlaunder = 1;
781         if (pass)
782                 maxlaunder = 10000;
783
784         /*
785          * Initialize our marker
786          */
787         bzero(&marker, sizeof(marker));
788         marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
789         marker.queue = PQ_INACTIVE + q;
790         marker.pc = q;
791         marker.wire_count = 1;
792
793         /*
794          * Inactive queue scan.
795          *
796          * NOTE: The vm_page must be spinlocked before the queue to avoid
797          *       deadlocks, so it is easiest to simply iterate the loop
798          *       with the queue unlocked at the top.
799          */
800         vpfailed = NULL;
801
802         vm_page_queues_spin_lock(PQ_INACTIVE + q);
803         TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
804         maxscan = vmstats.v_inactive_count;
805         vm_page_queues_spin_unlock(PQ_INACTIVE + q);
806
807         while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
808                maxscan-- > 0 && inactive_shortage - delta > 0)
809         {
810                 vm_page_and_queue_spin_lock(m);
811                 if (m != TAILQ_NEXT(&marker, pageq)) {
812                         vm_page_and_queue_spin_unlock(m);
813                         ++maxscan;
814                         continue;
815                 }
816                 KKASSERT(m->queue - m->pc == PQ_INACTIVE);
817                 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl,
818                              &marker, pageq);
819                 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE + q].pl, m,
820                                    &marker, pageq);
821                 mycpu->gd_cnt.v_pdpages++;
822
823                 /*
824                  * Skip marker pages
825                  */
826                 if (m->flags & PG_MARKER) {
827                         vm_page_and_queue_spin_unlock(m);
828                         continue;
829                 }
830
831                 /*
832                  * Try to busy the page.  Don't mess with pages which are
833                  * already busy or reorder them in the queue.
834                  */
835                 if (vm_page_busy_try(m, TRUE)) {
836                         vm_page_and_queue_spin_unlock(m);
837                         continue;
838                 }
839                 vm_page_and_queue_spin_unlock(m);
840                 KKASSERT(m->queue - m->pc == PQ_INACTIVE);
841
842                 lwkt_yield();
843
844                 /*
845                  * The page has been successfully busied and is now no
846                  * longer spinlocked.  The queue is no longer spinlocked
847                  * either.
848                  */
849
850                 /*
851                  * It is possible for a page to be busied ad-hoc (e.g. the
852                  * pmap_collect() code) and wired and race against the
853                  * allocation of a new page.  vm_page_alloc() may be forced
854                  * to deactivate the wired page in which case it winds up
855                  * on the inactive queue and must be handled here.  We
856                  * correct the problem simply by unqueuing the page.
857                  */
858                 if (m->wire_count) {
859                         vm_page_unqueue_nowakeup(m);
860                         vm_page_wakeup(m);
861                         kprintf("WARNING: pagedaemon: wired page on "
862                                 "inactive queue %p\n", m);
863                         continue;
864                 }
865
866                 /*
867                  * A held page may be undergoing I/O, so skip it.
868                  */
869                 if (m->hold_count) {
870                         vm_page_and_queue_spin_lock(m);
871                         if (m->queue - m->pc == PQ_INACTIVE) {
872                                 TAILQ_REMOVE(
873                                         &vm_page_queues[PQ_INACTIVE + q].pl,
874                                         m, pageq);
875                                 TAILQ_INSERT_TAIL(
876                                         &vm_page_queues[PQ_INACTIVE + q].pl,
877                                         m, pageq);
878                         }
879                         vm_page_and_queue_spin_unlock(m);
880                         ++vm_swapcache_inactive_heuristic;
881                         vm_page_wakeup(m);
882                         continue;
883                 }
884
885                 if (m->object->ref_count == 0) {
886                         /*
887                          * If the object is not being used, we ignore previous 
888                          * references.
889                          */
890                         vm_page_flag_clear(m, PG_REFERENCED);
891                         pmap_clear_reference(m);
892                         /* fall through to end */
893                 } else if (((m->flags & PG_REFERENCED) == 0) &&
894                             (actcount = pmap_ts_referenced(m))) {
895                         /*
896                          * Otherwise, if the page has been referenced while 
897                          * in the inactive queue, we bump the "activation
898                          * count" upwards, making it less likely that the
899                          * page will be added back to the inactive queue
900                          * prematurely again.  Here we check the page tables
901                          * (or emulated bits, if any), given the upper level
902                          * VM system not knowing anything about existing 
903                          * references.
904                          */
905                         vm_page_activate(m);
906                         m->act_count += (actcount + ACT_ADVANCE);
907                         vm_page_wakeup(m);
908                         continue;
909                 }
910
911                 /*
912                  * (m) is still busied.
913                  *
914                  * If the upper level VM system knows about any page 
915                  * references, we activate the page.  We also set the 
916                  * "activation count" higher than normal so that we will less 
917                  * likely place pages back onto the inactive queue again.
918                  */
919                 if ((m->flags & PG_REFERENCED) != 0) {
920                         vm_page_flag_clear(m, PG_REFERENCED);
921                         actcount = pmap_ts_referenced(m);
922                         vm_page_activate(m);
923                         m->act_count += (actcount + ACT_ADVANCE + 1);
924                         vm_page_wakeup(m);
925                         continue;
926                 }
927
928                 /*
929                  * If the upper level VM system doesn't know anything about 
930                  * the page being dirty, we have to check for it again.  As 
931                  * far as the VM code knows, any partially dirty pages are 
932                  * fully dirty.
933                  *
934                  * Pages marked PG_WRITEABLE may be mapped into the user
935                  * address space of a process running on another cpu.  A
936                  * user process (without holding the MP lock) running on
937                  * another cpu may be able to touch the page while we are
938                  * trying to remove it.  vm_page_cache() will handle this
939                  * case for us.
940                  */
941                 if (m->dirty == 0) {
942                         vm_page_test_dirty(m);
943                 } else {
944                         vm_page_dirty(m);
945                 }
946
947                 if (m->valid == 0) {
948                         /*
949                          * Invalid pages can be easily freed
950                          */
951                         vm_pageout_page_free(m);
952                         mycpu->gd_cnt.v_dfree++;
953                         ++delta;
954                 } else if (m->dirty == 0) {
955                         /*
956                          * Clean pages can be placed onto the cache queue.
957                          * This effectively frees them.
958                          */
959                         vm_page_cache(m);
960                         ++delta;
961                 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) {
962                         /*
963                          * Dirty pages need to be paged out, but flushing
964                          * a page is extremely expensive verses freeing
965                          * a clean page.  Rather then artificially limiting
966                          * the number of pages we can flush, we instead give
967                          * dirty pages extra priority on the inactive queue
968                          * by forcing them to be cycled through the queue
969                          * twice before being flushed, after which the 
970                          * (now clean) page will cycle through once more
971                          * before being freed.  This significantly extends
972                          * the thrash point for a heavily loaded machine.
973                          */
974                         vm_page_flag_set(m, PG_WINATCFLS);
975                         vm_page_and_queue_spin_lock(m);
976                         if (m->queue - m->pc == PQ_INACTIVE) {
977                                 TAILQ_REMOVE(
978                                         &vm_page_queues[PQ_INACTIVE + q].pl,
979                                         m, pageq);
980                                 TAILQ_INSERT_TAIL(
981                                         &vm_page_queues[PQ_INACTIVE + q].pl,
982                                         m, pageq);
983                         }
984                         vm_page_and_queue_spin_unlock(m);
985                         ++vm_swapcache_inactive_heuristic;
986                         vm_page_wakeup(m);
987                 } else if (maxlaunder > 0) {
988                         /*
989                          * We always want to try to flush some dirty pages if
990                          * we encounter them, to keep the system stable.
991                          * Normally this number is small, but under extreme
992                          * pressure where there are insufficient clean pages
993                          * on the inactive queue, we may have to go all out.
994                          */
995                         int swap_pageouts_ok;
996                         struct vnode *vp = NULL;
997
998                         object = m->object;
999
1000                         if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) {
1001                                 swap_pageouts_ok = 1;
1002                         } else {
1003                                 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
1004                                 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
1005                                 vm_page_count_min(0));
1006                                                                                 
1007                         }
1008
1009                         /*
1010                          * We don't bother paging objects that are "dead".  
1011                          * Those objects are in a "rundown" state.
1012                          */
1013                         if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) {
1014                                 vm_page_and_queue_spin_lock(m);
1015                                 if (m->queue - m->pc == PQ_INACTIVE) {
1016                                         TAILQ_REMOVE(
1017                                             &vm_page_queues[PQ_INACTIVE + q].pl,
1018                                             m, pageq);
1019                                         TAILQ_INSERT_TAIL(
1020                                             &vm_page_queues[PQ_INACTIVE + q].pl,
1021                                             m, pageq);
1022                                 }
1023                                 vm_page_and_queue_spin_unlock(m);
1024                                 ++vm_swapcache_inactive_heuristic;
1025                                 vm_page_wakeup(m);
1026                                 continue;
1027                         }
1028
1029                         /*
1030                          * (m) is still busied.
1031                          *
1032                          * The object is already known NOT to be dead.   It
1033                          * is possible for the vget() to block the whole
1034                          * pageout daemon, but the new low-memory handling
1035                          * code should prevent it.
1036                          *
1037                          * The previous code skipped locked vnodes and, worse,
1038                          * reordered pages in the queue.  This results in
1039                          * completely non-deterministic operation because,
1040                          * quite often, a vm_fault has initiated an I/O and
1041                          * is holding a locked vnode at just the point where
1042                          * the pageout daemon is woken up.
1043                          *
1044                          * We can't wait forever for the vnode lock, we might
1045                          * deadlock due to a vn_read() getting stuck in
1046                          * vm_wait while holding this vnode.  We skip the 
1047                          * vnode if we can't get it in a reasonable amount
1048                          * of time.
1049                          *
1050                          * vpfailed is used to (try to) avoid the case where
1051                          * a large number of pages are associated with a
1052                          * locked vnode, which could cause the pageout daemon
1053                          * to stall for an excessive amount of time.
1054                          */
1055                         if (object->type == OBJT_VNODE) {
1056                                 int flags;
1057
1058                                 vp = object->handle;
1059                                 flags = LK_EXCLUSIVE | LK_NOOBJ;
1060                                 if (vp == vpfailed)
1061                                         flags |= LK_NOWAIT;
1062                                 else
1063                                         flags |= LK_TIMELOCK;
1064                                 vm_page_hold(m);
1065                                 vm_page_wakeup(m);
1066
1067                                 /*
1068                                  * We have unbusied (m) temporarily so we can
1069                                  * acquire the vp lock without deadlocking.
1070                                  * (m) is held to prevent destruction.
1071                                  */
1072                                 if (vget(vp, flags) != 0) {
1073                                         vpfailed = vp;
1074                                         ++pageout_lock_miss;
1075                                         if (object->flags & OBJ_MIGHTBEDIRTY)
1076                                                     ++*vnodes_skippedp;
1077                                         vm_page_unhold(m);
1078                                         continue;
1079                                 }
1080
1081                                 /*
1082                                  * The page might have been moved to another
1083                                  * queue during potential blocking in vget()
1084                                  * above.  The page might have been freed and
1085                                  * reused for another vnode.  The object might
1086                                  * have been reused for another vnode.
1087                                  */
1088                                 if (m->queue - m->pc != PQ_INACTIVE ||
1089                                     m->object != object ||
1090                                     object->handle != vp) {
1091                                         if (object->flags & OBJ_MIGHTBEDIRTY)
1092                                                 ++*vnodes_skippedp;
1093                                         vput(vp);
1094                                         vm_page_unhold(m);
1095                                         continue;
1096                                 }
1097         
1098                                 /*
1099                                  * The page may have been busied during the
1100                                  * blocking in vput();  We don't move the
1101                                  * page back onto the end of the queue so that
1102                                  * statistics are more correct if we don't.
1103                                  */
1104                                 if (vm_page_busy_try(m, TRUE)) {
1105                                         vput(vp);
1106                                         vm_page_unhold(m);
1107                                         continue;
1108                                 }
1109                                 vm_page_unhold(m);
1110
1111                                 /*
1112                                  * (m) is busied again
1113                                  *
1114                                  * We own the busy bit and remove our hold
1115                                  * bit.  If the page is still held it
1116                                  * might be undergoing I/O, so skip it.
1117                                  */
1118                                 if (m->hold_count) {
1119                                         vm_page_and_queue_spin_lock(m);
1120                                         if (m->queue - m->pc == PQ_INACTIVE) {
1121                                                 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl, m, pageq);
1122                                                 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE + q].pl, m, pageq);
1123                                         }
1124                                         vm_page_and_queue_spin_unlock(m);
1125                                         ++vm_swapcache_inactive_heuristic;
1126                                         if (object->flags & OBJ_MIGHTBEDIRTY)
1127                                                 ++*vnodes_skippedp;
1128                                         vm_page_wakeup(m);
1129                                         vput(vp);
1130                                         continue;
1131                                 }
1132                                 /* (m) is left busied as we fall through */
1133                         }
1134
1135                         /*
1136                          * page is busy and not held here.
1137                          *
1138                          * If a page is dirty, then it is either being washed
1139                          * (but not yet cleaned) or it is still in the
1140                          * laundry.  If it is still in the laundry, then we
1141                          * start the cleaning operation. 
1142                          *
1143                          * decrement inactive_shortage on success to account
1144                          * for the (future) cleaned page.  Otherwise we
1145                          * could wind up laundering or cleaning too many
1146                          * pages.
1147                          */
1148                         if (vm_pageout_clean(m) != 0) {
1149                                 ++delta;
1150                                 --maxlaunder;
1151                         }
1152                         /* clean ate busy, page no longer accessible */
1153                         if (vp != NULL)
1154                                 vput(vp);
1155                 } else {
1156                         vm_page_wakeup(m);
1157                 }
1158         }
1159         vm_page_queues_spin_lock(PQ_INACTIVE + q);
1160         TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
1161         vm_page_queues_spin_unlock(PQ_INACTIVE + q);
1162
1163         return (delta);
1164 }
1165
1166 static int
1167 vm_pageout_scan_active(int pass, int q,
1168                        int inactive_shortage, int active_shortage,
1169                        int *recycle_countp)
1170 {
1171         struct vm_page marker;
1172         vm_page_t m;
1173         int actcount;
1174         int delta = 0;
1175         int pcount;
1176
1177         /*
1178          * We want to move pages from the active queue to the inactive
1179          * queue to get the inactive queue to the inactive target.  If
1180          * we still have a page shortage from above we try to directly free
1181          * clean pages instead of moving them.
1182          *
1183          * If we do still have a shortage we keep track of the number of
1184          * pages we free or cache (recycle_count) as a measure of thrashing
1185          * between the active and inactive queues.
1186          *
1187          * If we were able to completely satisfy the free+cache targets
1188          * from the inactive pool we limit the number of pages we move
1189          * from the active pool to the inactive pool to 2x the pages we
1190          * had removed from the inactive pool (with a minimum of 1/5 the
1191          * inactive target).  If we were not able to completely satisfy
1192          * the free+cache targets we go for the whole target aggressively.
1193          *
1194          * NOTE: Both variables can end up negative.
1195          * NOTE: We are still in a critical section.
1196          */
1197
1198         bzero(&marker, sizeof(marker));
1199         marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
1200         marker.queue = PQ_ACTIVE + q;
1201         marker.pc = q;
1202         marker.wire_count = 1;
1203
1204         vm_page_queues_spin_lock(PQ_ACTIVE + q);
1205         TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1206         vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1207         pcount = vmstats.v_active_count;
1208
1209         while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
1210                pcount-- > 0 && (inactive_shortage - delta > 0 ||
1211                                 active_shortage > 0))
1212         {
1213                 vm_page_and_queue_spin_lock(m);
1214                 if (m != TAILQ_NEXT(&marker, pageq)) {
1215                         vm_page_and_queue_spin_unlock(m);
1216                         ++pcount;
1217                         continue;
1218                 }
1219                 KKASSERT(m->queue - m->pc == PQ_ACTIVE);
1220                 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl,
1221                              &marker, pageq);
1222                 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
1223                                    &marker, pageq);
1224
1225                 /*
1226                  * Skip marker pages
1227                  */
1228                 if (m->flags & PG_MARKER) {
1229                         vm_page_and_queue_spin_unlock(m);
1230                         continue;
1231                 }
1232
1233                 /*
1234                  * Try to busy the page.  Don't mess with pages which are
1235                  * already busy or reorder them in the queue.
1236                  */
1237                 if (vm_page_busy_try(m, TRUE)) {
1238                         vm_page_and_queue_spin_unlock(m);
1239                         continue;
1240                 }
1241
1242                 /*
1243                  * Don't deactivate pages that are held, even if we can
1244                  * busy them.  (XXX why not?)
1245                  */
1246                 if (m->hold_count != 0) {
1247                         TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl,
1248                                      m, pageq);
1249                         TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE + q].pl,
1250                                           m, pageq);
1251                         vm_page_and_queue_spin_unlock(m);
1252                         vm_page_wakeup(m);
1253                         continue;
1254                 }
1255                 vm_page_and_queue_spin_unlock(m);
1256                 lwkt_yield();
1257
1258                 /*
1259                  * The page has been successfully busied and the page and
1260                  * queue are no longer locked.
1261                  */
1262
1263                 /*
1264                  * The count for pagedaemon pages is done after checking the
1265                  * page for eligibility...
1266                  */
1267                 mycpu->gd_cnt.v_pdpages++;
1268
1269                 /*
1270                  * Check to see "how much" the page has been used and clear
1271                  * the tracking access bits.  If the object has no references
1272                  * don't bother paying the expense.
1273                  */
1274                 actcount = 0;
1275                 if (m->object->ref_count != 0) {
1276                         if (m->flags & PG_REFERENCED)
1277                                 ++actcount;
1278                         actcount += pmap_ts_referenced(m);
1279                         if (actcount) {
1280                                 m->act_count += ACT_ADVANCE + actcount;
1281                                 if (m->act_count > ACT_MAX)
1282                                         m->act_count = ACT_MAX;
1283                         }
1284                 }
1285                 vm_page_flag_clear(m, PG_REFERENCED);
1286
1287                 /*
1288                  * actcount is only valid if the object ref_count is non-zero.
1289                  */
1290                 if (actcount && m->object->ref_count != 0) {
1291                         vm_page_and_queue_spin_lock(m);
1292                         if (m->queue - m->pc == PQ_ACTIVE) {
1293                                 TAILQ_REMOVE(
1294                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1295                                         m, pageq);
1296                                 TAILQ_INSERT_TAIL(
1297                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1298                                         m, pageq);
1299                         }
1300                         vm_page_and_queue_spin_unlock(m);
1301                         vm_page_wakeup(m);
1302                 } else {
1303                         m->act_count -= min(m->act_count, ACT_DECLINE);
1304                         if (vm_pageout_algorithm ||
1305                             m->object->ref_count == 0 ||
1306                             m->act_count < pass + 1
1307                         ) {
1308                                 /*
1309                                  * Deactivate the page.  If we had a
1310                                  * shortage from our inactive scan try to
1311                                  * free (cache) the page instead.
1312                                  *
1313                                  * Don't just blindly cache the page if
1314                                  * we do not have a shortage from the
1315                                  * inactive scan, that could lead to
1316                                  * gigabytes being moved.
1317                                  */
1318                                 --active_shortage;
1319                                 if (inactive_shortage - delta > 0 ||
1320                                     m->object->ref_count == 0) {
1321                                         if (inactive_shortage - delta > 0)
1322                                                 ++*recycle_countp;
1323                                         vm_page_protect(m, VM_PROT_NONE);
1324                                         if (m->dirty == 0 &&
1325                                             inactive_shortage - delta > 0) {
1326                                                 ++delta;
1327                                                 vm_page_cache(m);
1328                                         } else {
1329                                                 vm_page_deactivate(m);
1330                                                 vm_page_wakeup(m);
1331                                         }
1332                                 } else {
1333                                         vm_page_deactivate(m);
1334                                         vm_page_wakeup(m);
1335                                 }
1336                         } else {
1337                                 vm_page_and_queue_spin_lock(m);
1338                                 if (m->queue - m->pc == PQ_ACTIVE) {
1339                                         TAILQ_REMOVE(
1340                                             &vm_page_queues[PQ_ACTIVE + q].pl,
1341                                             m, pageq);
1342                                         TAILQ_INSERT_TAIL(
1343                                             &vm_page_queues[PQ_ACTIVE + q].pl,
1344                                             m, pageq);
1345                                 }
1346                                 vm_page_and_queue_spin_unlock(m);
1347                                 vm_page_wakeup(m);
1348                         }
1349                 }
1350         }
1351
1352         /*
1353          * Clean out our local marker.
1354          */
1355         vm_page_queues_spin_lock(PQ_ACTIVE + q);
1356         TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1357         vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1358
1359         return (delta);
1360 }
1361
1362 /*
1363  * The number of actually free pages can drop down to v_free_reserved,
1364  * we try to build the free count back above v_free_min.  Note that
1365  * vm_paging_needed() also returns TRUE if v_free_count is not at
1366  * least v_free_min so that is the minimum we must build the free
1367  * count to.
1368  *
1369  * We use a slightly higher target to improve hysteresis,
1370  * ((v_free_target + v_free_min) / 2).  Since v_free_target
1371  * is usually the same as v_cache_min this maintains about
1372  * half the pages in the free queue as are in the cache queue,
1373  * providing pretty good pipelining for pageout operation.
1374  *
1375  * The system operator can manipulate vm.v_cache_min and
1376  * vm.v_free_target to tune the pageout demon.  Be sure
1377  * to keep vm.v_free_min < vm.v_free_target.
1378  *
1379  * Note that the original paging target is to get at least
1380  * (free_min + cache_min) into (free + cache).  The slightly
1381  * higher target will shift additional pages from cache to free
1382  * without effecting the original paging target in order to
1383  * maintain better hysteresis and not have the free count always
1384  * be dead-on v_free_min.
1385  *
1386  * NOTE: we are still in a critical section.
1387  *
1388  * Pages moved from PQ_CACHE to totally free are not counted in the
1389  * pages_freed counter.
1390  */
1391 static void
1392 vm_pageout_scan_cache(int inactive_shortage,
1393                       int vnodes_skipped, int recycle_count)
1394 {
1395         struct vm_pageout_scan_info info;
1396         vm_page_t m;
1397
1398         while (vmstats.v_free_count <
1399                (vmstats.v_free_min + vmstats.v_free_target) / 2) {
1400                 /*
1401                  * This steals some code from vm/vm_page.c
1402                  */
1403                 static int cache_rover = 0;
1404
1405                 m = vm_page_list_find(PQ_CACHE, cache_rover & PQ_L2_MASK, FALSE);
1406                 if (m == NULL)
1407                         break;
1408                 /* page is returned removed from its queue and spinlocked */
1409                 if (vm_page_busy_try(m, TRUE)) {
1410                         vm_page_deactivate_locked(m);
1411                         vm_page_spin_unlock(m);
1412 #ifdef INVARIANTS
1413                         kprintf("Warning: busy page %p found in cache\n", m);
1414 #endif
1415                         continue;
1416                 }
1417                 vm_page_spin_unlock(m);
1418                 pagedaemon_wakeup();
1419                 lwkt_yield();
1420
1421                 /*
1422                  * Page has been successfully busied and it and its queue
1423                  * is no longer spinlocked.
1424                  */
1425                 if ((m->flags & PG_UNMANAGED) ||
1426                     m->hold_count ||
1427                     m->wire_count) {
1428                         vm_page_deactivate(m);
1429                         vm_page_wakeup(m);
1430                         continue;
1431                 }
1432                 KKASSERT((m->flags & PG_MAPPED) == 0);
1433                 KKASSERT(m->dirty == 0);
1434                 cache_rover += PQ_PRIME2;
1435                 vm_pageout_page_free(m);
1436                 mycpu->gd_cnt.v_dfree++;
1437         }
1438
1439 #if !defined(NO_SWAPPING)
1440         /*
1441          * Idle process swapout -- run once per second.
1442          */
1443         if (vm_swap_idle_enabled) {
1444                 static long lsec;
1445                 if (time_second != lsec) {
1446                         vm_pageout_req_swapout |= VM_SWAP_IDLE;
1447                         vm_req_vmdaemon();
1448                         lsec = time_second;
1449                 }
1450         }
1451 #endif
1452                 
1453         /*
1454          * If we didn't get enough free pages, and we have skipped a vnode
1455          * in a writeable object, wakeup the sync daemon.  And kick swapout
1456          * if we did not get enough free pages.
1457          */
1458         if (vm_paging_target() > 0) {
1459                 if (vnodes_skipped && vm_page_count_min(0))
1460                         speedup_syncer();
1461 #if !defined(NO_SWAPPING)
1462                 if (vm_swap_enabled && vm_page_count_target()) {
1463                         vm_req_vmdaemon();
1464                         vm_pageout_req_swapout |= VM_SWAP_NORMAL;
1465                 }
1466 #endif
1467         }
1468
1469         /*
1470          * Handle catastrophic conditions.  Under good conditions we should
1471          * be at the target, well beyond our minimum.  If we could not even
1472          * reach our minimum the system is under heavy stress.
1473          *
1474          * Determine whether we have run out of memory.  This occurs when
1475          * swap_pager_full is TRUE and the only pages left in the page
1476          * queues are dirty.  We will still likely have page shortages.
1477          *
1478          * - swap_pager_full is set if insufficient swap was
1479          *   available to satisfy a requested pageout.
1480          *
1481          * - the inactive queue is bloated (4 x size of active queue),
1482          *   meaning it is unable to get rid of dirty pages and.
1483          *
1484          * - vm_page_count_min() without counting pages recycled from the
1485          *   active queue (recycle_count) means we could not recover
1486          *   enough pages to meet bare minimum needs.  This test only
1487          *   works if the inactive queue is bloated.
1488          *
1489          * - due to a positive inactive_shortage we shifted the remaining
1490          *   dirty pages from the active queue to the inactive queue
1491          *   trying to find clean ones to free.
1492          */
1493         if (swap_pager_full && vm_page_count_min(recycle_count))
1494                 kprintf("Warning: system low on memory+swap!\n");
1495         if (swap_pager_full && vm_page_count_min(recycle_count) &&
1496             vmstats.v_inactive_count > vmstats.v_active_count * 4 &&
1497             inactive_shortage > 0) {
1498                 /*
1499                  * Kill something.
1500                  */
1501                 info.bigproc = NULL;
1502                 info.bigsize = 0;
1503                 allproc_scan(vm_pageout_scan_callback, &info);
1504                 if (info.bigproc != NULL) {
1505                         killproc(info.bigproc, "out of swap space");
1506                         info.bigproc->p_nice = PRIO_MIN;
1507                         info.bigproc->p_usched->resetpriority(
1508                                 FIRST_LWP_IN_PROC(info.bigproc));
1509                         wakeup(&vmstats.v_free_count);
1510                         PRELE(info.bigproc);
1511                 }
1512         }
1513 }
1514
1515 /*
1516  * The caller must hold proc_token.
1517  */
1518 static int
1519 vm_pageout_scan_callback(struct proc *p, void *data)
1520 {
1521         struct vm_pageout_scan_info *info = data;
1522         vm_offset_t size;
1523
1524         /*
1525          * Never kill system processes or init.  If we have configured swap
1526          * then try to avoid killing low-numbered pids.
1527          */
1528         if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) ||
1529             ((p->p_pid < 48) && (vm_swap_size != 0))) {
1530                 return (0);
1531         }
1532
1533         /*
1534          * if the process is in a non-running type state,
1535          * don't touch it.
1536          */
1537         if (p->p_stat != SACTIVE && p->p_stat != SSTOP)
1538                 return (0);
1539
1540         /*
1541          * Get the approximate process size.  Note that anonymous pages
1542          * with backing swap will be counted twice, but there should not
1543          * be too many such pages due to the stress the VM system is
1544          * under at this point.
1545          */
1546         size = vmspace_anonymous_count(p->p_vmspace) +
1547                 vmspace_swap_count(p->p_vmspace);
1548
1549         /*
1550          * If the this process is bigger than the biggest one
1551          * remember it.
1552          */
1553         if (info->bigsize < size) {
1554                 if (info->bigproc)
1555                         PRELE(info->bigproc);
1556                 PHOLD(p);
1557                 info->bigproc = p;
1558                 info->bigsize = size;
1559         }
1560         lwkt_yield();
1561         return(0);
1562 }
1563
1564 /*
1565  * This routine tries to maintain the pseudo LRU active queue,
1566  * so that during long periods of time where there is no paging,
1567  * that some statistic accumulation still occurs.  This code
1568  * helps the situation where paging just starts to occur.
1569  */
1570 static void
1571 vm_pageout_page_stats(int q)
1572 {
1573         static int fullintervalcount = 0;
1574         struct vm_page marker;
1575         vm_page_t m;
1576         int pcount, tpcount;            /* Number of pages to check */
1577         int page_shortage;
1578
1579         page_shortage = (vmstats.v_inactive_target + vmstats.v_cache_max +
1580                          vmstats.v_free_min) -
1581                         (vmstats.v_free_count + vmstats.v_inactive_count +
1582                          vmstats.v_cache_count);
1583
1584         if (page_shortage <= 0)
1585                 return;
1586
1587         pcount = vmstats.v_active_count;
1588         fullintervalcount += vm_pageout_stats_interval;
1589         if (fullintervalcount < vm_pageout_full_stats_interval) {
1590                 tpcount = (vm_pageout_stats_max * vmstats.v_active_count) /
1591                           vmstats.v_page_count;
1592                 if (pcount > tpcount)
1593                         pcount = tpcount;
1594         } else {
1595                 fullintervalcount = 0;
1596         }
1597
1598         bzero(&marker, sizeof(marker));
1599         marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
1600         marker.queue = PQ_ACTIVE + q;
1601         marker.pc = q;
1602         marker.wire_count = 1;
1603
1604         vm_page_queues_spin_lock(PQ_ACTIVE + q);
1605         TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1606         vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1607
1608         while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
1609                pcount-- > 0)
1610         {
1611                 int actcount;
1612
1613                 vm_page_and_queue_spin_lock(m);
1614                 if (m != TAILQ_NEXT(&marker, pageq)) {
1615                         vm_page_and_queue_spin_unlock(m);
1616                         ++pcount;
1617                         continue;
1618                 }
1619                 KKASSERT(m->queue - m->pc == PQ_ACTIVE);
1620                 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1621                 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
1622                                    &marker, pageq);
1623
1624                 /*
1625                  * Ignore markers
1626                  */
1627                 if (m->flags & PG_MARKER) {
1628                         vm_page_and_queue_spin_unlock(m);
1629                         continue;
1630                 }
1631
1632                 /*
1633                  * Ignore pages we can't busy
1634                  */
1635                 if (vm_page_busy_try(m, TRUE)) {
1636                         vm_page_and_queue_spin_unlock(m);
1637                         continue;
1638                 }
1639                 vm_page_and_queue_spin_unlock(m);
1640                 KKASSERT(m->queue - m->pc == PQ_ACTIVE);
1641
1642                 /*
1643                  * We now have a safely busied page, the page and queue
1644                  * spinlocks have been released.
1645                  *
1646                  * Ignore held pages
1647                  */
1648                 if (m->hold_count) {
1649                         vm_page_wakeup(m);
1650                         continue;
1651                 }
1652
1653                 /*
1654                  * Calculate activity
1655                  */
1656                 actcount = 0;
1657                 if (m->flags & PG_REFERENCED) {
1658                         vm_page_flag_clear(m, PG_REFERENCED);
1659                         actcount += 1;
1660                 }
1661                 actcount += pmap_ts_referenced(m);
1662
1663                 /*
1664                  * Update act_count and move page to end of queue.
1665                  */
1666                 if (actcount) {
1667                         m->act_count += ACT_ADVANCE + actcount;
1668                         if (m->act_count > ACT_MAX)
1669                                 m->act_count = ACT_MAX;
1670                         vm_page_and_queue_spin_lock(m);
1671                         if (m->queue - m->pc == PQ_ACTIVE) {
1672                                 TAILQ_REMOVE(
1673                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1674                                         m, pageq);
1675                                 TAILQ_INSERT_TAIL(
1676                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1677                                         m, pageq);
1678                         }
1679                         vm_page_and_queue_spin_unlock(m);
1680                         vm_page_wakeup(m);
1681                         continue;
1682                 }
1683
1684                 if (m->act_count == 0) {
1685                         /*
1686                          * We turn off page access, so that we have
1687                          * more accurate RSS stats.  We don't do this
1688                          * in the normal page deactivation when the
1689                          * system is loaded VM wise, because the
1690                          * cost of the large number of page protect
1691                          * operations would be higher than the value
1692                          * of doing the operation.
1693                          *
1694                          * We use the marker to save our place so
1695                          * we can release the spin lock.  both (m)
1696                          * and (next) will be invalid.
1697                          */
1698                         vm_page_protect(m, VM_PROT_NONE);
1699                         vm_page_deactivate(m);
1700                 } else {
1701                         m->act_count -= min(m->act_count, ACT_DECLINE);
1702                         vm_page_and_queue_spin_lock(m);
1703                         if (m->queue - m->pc == PQ_ACTIVE) {
1704                                 TAILQ_REMOVE(
1705                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1706                                         m, pageq);
1707                                 TAILQ_INSERT_TAIL(
1708                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1709                                         m, pageq);
1710                         }
1711                         vm_page_and_queue_spin_unlock(m);
1712                 }
1713                 vm_page_wakeup(m);
1714         }
1715
1716         /*
1717          * Remove our local marker
1718          */
1719         vm_page_queues_spin_lock(PQ_ACTIVE + q);
1720         TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1721         vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1722 }
1723
1724 static int
1725 vm_pageout_free_page_calc(vm_size_t count)
1726 {
1727         if (count < vmstats.v_page_count)
1728                  return 0;
1729         /*
1730          * free_reserved needs to include enough for the largest swap pager
1731          * structures plus enough for any pv_entry structs when paging.
1732          *
1733          * v_free_min           normal allocations
1734          * v_free_reserved      system allocations
1735          * v_pageout_free_min   allocations by pageout daemon
1736          * v_interrupt_free_min low level allocations (e.g swap structures)
1737          */
1738         if (vmstats.v_page_count > 1024)
1739                 vmstats.v_free_min = 64 + (vmstats.v_page_count - 1024) / 200;
1740         else
1741                 vmstats.v_free_min = 64;
1742         vmstats.v_free_reserved = vmstats.v_free_min * 4 / 8 + 7;
1743         vmstats.v_free_severe = vmstats.v_free_min * 4 / 8 + 0;
1744         vmstats.v_pageout_free_min = vmstats.v_free_min * 2 / 8 + 7;
1745         vmstats.v_interrupt_free_min = vmstats.v_free_min * 1 / 8 + 7;
1746
1747         return 1;
1748 }
1749
1750
1751 /*
1752  * vm_pageout is the high level pageout daemon.
1753  *
1754  * No requirements.
1755  */
1756 static void
1757 vm_pageout_thread(void)
1758 {
1759         int pass;
1760         int q;
1761
1762         /*
1763          * Initialize some paging parameters.
1764          */
1765         curthread->td_flags |= TDF_SYSTHREAD;
1766
1767         if (vmstats.v_page_count < 2000)
1768                 vm_pageout_page_count = 8;
1769
1770         vm_pageout_free_page_calc(vmstats.v_page_count);
1771
1772         /*
1773          * v_free_target and v_cache_min control pageout hysteresis.  Note
1774          * that these are more a measure of the VM cache queue hysteresis
1775          * then the VM free queue.  Specifically, v_free_target is the
1776          * high water mark (free+cache pages).
1777          *
1778          * v_free_reserved + v_cache_min (mostly means v_cache_min) is the
1779          * low water mark, while v_free_min is the stop.  v_cache_min must
1780          * be big enough to handle memory needs while the pageout daemon
1781          * is signalled and run to free more pages.
1782          */
1783         if (vmstats.v_free_count > 6144)
1784                 vmstats.v_free_target = 4 * vmstats.v_free_min + vmstats.v_free_reserved;
1785         else
1786                 vmstats.v_free_target = 2 * vmstats.v_free_min + vmstats.v_free_reserved;
1787
1788         /*
1789          * NOTE: With the new buffer cache b_act_count we want the default
1790          *       inactive target to be a percentage of available memory.
1791          *
1792          *       The inactive target essentially determines the minimum
1793          *       number of 'temporary' pages capable of caching one-time-use
1794          *       files when the VM system is otherwise full of pages
1795          *       belonging to multi-time-use files or active program data.
1796          *
1797          * NOTE: The inactive target is aggressively persued only if the
1798          *       inactive queue becomes too small.  If the inactive queue
1799          *       is large enough to satisfy page movement to free+cache
1800          *       then it is repopulated more slowly from the active queue.
1801          *       This allows a general inactive_target default to be set.
1802          *
1803          *       There is an issue here for processes which sit mostly idle
1804          *       'overnight', such as sshd, tcsh, and X.  Any movement from
1805          *       the active queue will eventually cause such pages to
1806          *       recycle eventually causing a lot of paging in the morning.
1807          *       To reduce the incidence of this pages cycled out of the
1808          *       buffer cache are moved directly to the inactive queue if
1809          *       they were only used once or twice.
1810          *
1811          *       The vfs.vm_cycle_point sysctl can be used to adjust this.
1812          *       Increasing the value (up to 64) increases the number of
1813          *       buffer recyclements which go directly to the inactive queue.
1814          */
1815         if (vmstats.v_free_count > 2048) {
1816                 vmstats.v_cache_min = vmstats.v_free_target;
1817                 vmstats.v_cache_max = 2 * vmstats.v_cache_min;
1818         } else {
1819                 vmstats.v_cache_min = 0;
1820                 vmstats.v_cache_max = 0;
1821         }
1822         vmstats.v_inactive_target = vmstats.v_free_count / 4;
1823
1824         /* XXX does not really belong here */
1825         if (vm_page_max_wired == 0)
1826                 vm_page_max_wired = vmstats.v_free_count / 3;
1827
1828         if (vm_pageout_stats_max == 0)
1829                 vm_pageout_stats_max = vmstats.v_free_target;
1830
1831         /*
1832          * Set interval in seconds for stats scan.
1833          */
1834         if (vm_pageout_stats_interval == 0)
1835                 vm_pageout_stats_interval = 5;
1836         if (vm_pageout_full_stats_interval == 0)
1837                 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
1838         
1839
1840         /*
1841          * Set maximum free per pass
1842          */
1843         if (vm_pageout_stats_free_max == 0)
1844                 vm_pageout_stats_free_max = 5;
1845
1846         swap_pager_swap_init();
1847         pass = 0;
1848
1849         /*
1850          * The pageout daemon is never done, so loop forever.
1851          */
1852         while (TRUE) {
1853                 int error;
1854                 int delta1;
1855                 int delta2;
1856                 int inactive_shortage;
1857                 int active_shortage;
1858                 int vnodes_skipped = 0;
1859                 int recycle_count = 0;
1860                 int tmp;
1861
1862                 /*
1863                  * Wait for an action request.  If we timeout check to
1864                  * see if paging is needed (in case the normal wakeup
1865                  * code raced us).
1866                  */
1867                 if (vm_pages_needed == 0) {
1868                         error = tsleep(&vm_pages_needed,
1869                                        0, "psleep",
1870                                        vm_pageout_stats_interval * hz);
1871                         if (error &&
1872                             vm_paging_needed() == 0 &&
1873                             vm_pages_needed == 0) {
1874                                 for (q = 0; q < PQ_MAXL2_SIZE; ++q)
1875                                         vm_pageout_page_stats(q);
1876                                 continue;
1877                         }
1878                         vm_pages_needed = 1;
1879                 }
1880
1881                 mycpu->gd_cnt.v_pdwakeups++;
1882
1883                 /*
1884                  * Do whatever cleanup that the pmap code can.
1885                  */
1886                 pmap_collect();
1887
1888                 /*
1889                  * Scan for pageout.  Try to avoid thrashing the system
1890                  * with activity.
1891                  *
1892                  * Calculate our target for the number of free+cache pages we
1893                  * want to get to.  This is higher then the number that causes
1894                  * allocations to stall (severe) in order to provide hysteresis,
1895                  * and if we don't make it all the way but get to the minimum
1896                  * we're happy.
1897                  */
1898                 inactive_shortage = vm_paging_target() + vm_pageout_deficit;
1899                 vm_pageout_deficit = 0;
1900                 delta1 = 0;
1901                 for (q = 0; q < PQ_MAXL2_SIZE; ++q) {
1902                         delta1 += vm_pageout_scan_inactive(
1903                                         pass, q,
1904                                         inactive_shortage / PQ_MAXL2_SIZE + 1,
1905                                         &vnodes_skipped);
1906                 }
1907
1908                 /*
1909                  * Figure out how many active pages we must deactivate.  If
1910                  * we were able to reach our target with just the inactive
1911                  * scan above we limit the number of active pages we
1912                  * deactivate to reduce unnecessary work.
1913                  */
1914                 active_shortage = vmstats.v_inactive_target -
1915                                   vmstats.v_inactive_count;
1916
1917                 tmp = inactive_shortage;
1918                 if (tmp < vmstats.v_inactive_target / 10)
1919                         tmp = vmstats.v_inactive_target / 10;
1920                 inactive_shortage -= delta1;
1921                 if (inactive_shortage <= 0 && active_shortage > tmp * 2)
1922                         active_shortage = tmp * 2;
1923
1924                 delta2 = 0;
1925                 for (q = 0; q < PQ_MAXL2_SIZE; ++q) {
1926                         delta2 += vm_pageout_scan_active(
1927                                         pass, q,
1928                                         inactive_shortage / PQ_MAXL2_SIZE + 1,
1929                                         active_shortage / PQ_MAXL2_SIZE + 1,
1930                                         &recycle_count);
1931                 }
1932
1933                 /*
1934                  * Finally free enough cache pages to meet our free page
1935                  * requirement and take more drastic measures if we are
1936                  * still in trouble.
1937                  */
1938                 inactive_shortage -= delta2;
1939                 vm_pageout_scan_cache(inactive_shortage, vnodes_skipped,
1940                                       recycle_count);
1941
1942                 /*
1943                  * Wait for more work.
1944                  */
1945                 if (inactive_shortage > 0) {
1946                         ++pass;
1947                         if (swap_pager_full) {
1948                                 /*
1949                                  * Running out of memory, catastrophic back-off
1950                                  * to one-second intervals.
1951                                  */
1952                                 tsleep(&vm_pages_needed, 0, "pdelay", hz);
1953                         } else if (pass < 10 && vm_pages_needed > 1) {
1954                                 /*
1955                                  * Normal operation, additional processes
1956                                  * have already kicked us.  Retry immediately.
1957                                  */
1958                         } else if (pass < 10) {
1959                                 /*
1960                                  * Normal operation, fewer processes.  Delay
1961                                  * a bit but allow wakeups.
1962                                  */
1963                                 vm_pages_needed = 0;
1964                                 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
1965                                 vm_pages_needed = 1;
1966                         } else {
1967                                 /*
1968                                  * We've taken too many passes, forced delay.
1969                                  */
1970                                 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
1971                         }
1972                 } else {
1973                         /*
1974                          * Interlocked wakeup of waiters (non-optional)
1975                          */
1976                         pass = 0;
1977                         if (vm_pages_needed && !vm_page_count_min(0)) {
1978                                 wakeup(&vmstats.v_free_count);
1979                                 vm_pages_needed = 0;
1980                         }
1981                 }
1982         }
1983 }
1984
1985 static struct kproc_desc page_kp = {
1986         "pagedaemon",
1987         vm_pageout_thread,
1988         &pagethread
1989 };
1990 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp)
1991
1992
1993 /*
1994  * Called after allocating a page out of the cache or free queue
1995  * to possibly wake the pagedaemon up to replentish our supply.
1996  *
1997  * We try to generate some hysteresis by waking the pagedaemon up
1998  * when our free+cache pages go below the free_min+cache_min level.
1999  * The pagedaemon tries to get the count back up to at least the
2000  * minimum, and through to the target level if possible.
2001  *
2002  * If the pagedaemon is already active bump vm_pages_needed as a hint
2003  * that there are even more requests pending.
2004  *
2005  * SMP races ok?
2006  * No requirements.
2007  */
2008 void
2009 pagedaemon_wakeup(void)
2010 {
2011         if (vm_paging_needed() && curthread != pagethread) {
2012                 if (vm_pages_needed == 0) {
2013                         vm_pages_needed = 1;    /* SMP race ok */
2014                         wakeup(&vm_pages_needed);
2015                 } else if (vm_page_count_min(0)) {
2016                         ++vm_pages_needed;      /* SMP race ok */
2017                 }
2018         }
2019 }
2020
2021 #if !defined(NO_SWAPPING)
2022
2023 /*
2024  * SMP races ok?
2025  * No requirements.
2026  */
2027 static void
2028 vm_req_vmdaemon(void)
2029 {
2030         static int lastrun = 0;
2031
2032         if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
2033                 wakeup(&vm_daemon_needed);
2034                 lastrun = ticks;
2035         }
2036 }
2037
2038 static int vm_daemon_callback(struct proc *p, void *data __unused);
2039
2040 /*
2041  * No requirements.
2042  */
2043 static void
2044 vm_daemon(void)
2045 {
2046         /*
2047          * XXX vm_daemon_needed specific token?
2048          */
2049         while (TRUE) {
2050                 tsleep(&vm_daemon_needed, 0, "psleep", 0);
2051                 if (vm_pageout_req_swapout) {
2052                         swapout_procs(vm_pageout_req_swapout);
2053                         vm_pageout_req_swapout = 0;
2054                 }
2055                 /*
2056                  * scan the processes for exceeding their rlimits or if
2057                  * process is swapped out -- deactivate pages
2058                  */
2059                 allproc_scan(vm_daemon_callback, NULL);
2060         }
2061 }
2062
2063 /*
2064  * Caller must hold proc_token.
2065  */
2066 static int
2067 vm_daemon_callback(struct proc *p, void *data __unused)
2068 {
2069         vm_pindex_t limit, size;
2070
2071         /*
2072          * if this is a system process or if we have already
2073          * looked at this process, skip it.
2074          */
2075         if (p->p_flag & (P_SYSTEM | P_WEXIT))
2076                 return (0);
2077
2078         /*
2079          * if the process is in a non-running type state,
2080          * don't touch it.
2081          */
2082         if (p->p_stat != SACTIVE && p->p_stat != SSTOP)
2083                 return (0);
2084
2085         /*
2086          * get a limit
2087          */
2088         limit = OFF_TO_IDX(qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
2089                                 p->p_rlimit[RLIMIT_RSS].rlim_max));
2090
2091         /*
2092          * let processes that are swapped out really be
2093          * swapped out.  Set the limit to nothing to get as
2094          * many pages out to swap as possible.
2095          */
2096         if (p->p_flag & P_SWAPPEDOUT)
2097                 limit = 0;
2098
2099         lwkt_gettoken(&p->p_vmspace->vm_map.token);
2100         size = vmspace_resident_count(p->p_vmspace);
2101         if (limit >= 0 && size >= limit) {
2102                 vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map, limit);
2103         }
2104         lwkt_reltoken(&p->p_vmspace->vm_map.token);
2105         return (0);
2106 }
2107
2108 #endif