88d270dddd352c95a737053e2086ce9d421e5976
[dragonfly.git] / sys / vm / vm_pageout.c
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * The Mach Operating System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *      from: @(#)vm_pageout.c  7.4 (Berkeley) 5/7/91
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $FreeBSD: src/sys/vm/vm_pageout.c,v 1.151.2.15 2002/12/29 18:21:04 dillon Exp $
65  */
66
67 /*
68  *      The proverbial page-out daemon.
69  */
70
71 #include "opt_vm.h"
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/kernel.h>
75 #include <sys/proc.h>
76 #include <sys/kthread.h>
77 #include <sys/resourcevar.h>
78 #include <sys/signalvar.h>
79 #include <sys/vnode.h>
80 #include <sys/vmmeter.h>
81 #include <sys/conf.h>
82 #include <sys/sysctl.h>
83
84 #include <vm/vm.h>
85 #include <vm/vm_param.h>
86 #include <sys/lock.h>
87 #include <vm/vm_object.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_map.h>
90 #include <vm/vm_pageout.h>
91 #include <vm/vm_pager.h>
92 #include <vm/swap_pager.h>
93 #include <vm/vm_extern.h>
94
95 #include <sys/spinlock2.h>
96 #include <vm/vm_page2.h>
97
98 /*
99  * System initialization
100  */
101
102 /* the kernel process "vm_pageout"*/
103 static int vm_pageout_page(vm_page_t m, long *max_launderp,
104                            long *vnodes_skippedp, struct vnode **vpfailedp,
105                            int pass, int vmflush_flags);
106 static int vm_pageout_clean_helper (vm_page_t, int);
107 static int vm_pageout_free_page_calc (vm_size_t count);
108 static void vm_pageout_page_free(vm_page_t m) ;
109 struct thread *emergpager;
110 struct thread *pagethread;
111 static int sequence_emerg_pager;
112
113 #if !defined(NO_SWAPPING)
114 /* the kernel process "vm_daemon"*/
115 static void vm_daemon (void);
116 static struct   thread *vmthread;
117
118 static struct kproc_desc vm_kp = {
119         "vmdaemon",
120         vm_daemon,
121         &vmthread
122 };
123 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp);
124 #endif
125
126 int vm_pages_needed = 0;        /* Event on which pageout daemon sleeps */
127 int vm_pageout_deficit = 0;     /* Estimated number of pages deficit */
128 int vm_pageout_pages_needed = 0;/* pageout daemon needs pages */
129 int vm_page_free_hysteresis = 16;
130 static int vm_pagedaemon_time;
131
132 #if !defined(NO_SWAPPING)
133 static int vm_pageout_req_swapout;
134 static int vm_daemon_needed;
135 #endif
136 static int vm_max_launder = 4096;
137 static int vm_emerg_launder = 100;
138 static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
139 static int vm_pageout_full_stats_interval = 0;
140 static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0;
141 static int defer_swap_pageouts=0;
142 static int disable_swap_pageouts=0;
143 static u_int vm_anonmem_decline = ACT_DECLINE;
144 static u_int vm_filemem_decline = ACT_DECLINE * 2;
145
146 #if defined(NO_SWAPPING)
147 static int vm_swap_enabled=0;
148 static int vm_swap_idle_enabled=0;
149 #else
150 static int vm_swap_enabled=1;
151 static int vm_swap_idle_enabled=0;
152 #endif
153 int vm_pageout_memuse_mode=1;   /* 0-disable, 1-passive, 2-active swp*/
154
155 SYSCTL_UINT(_vm, VM_PAGEOUT_ALGORITHM, anonmem_decline,
156         CTLFLAG_RW, &vm_anonmem_decline, 0, "active->inactive anon memory");
157
158 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, filemem_decline,
159         CTLFLAG_RW, &vm_filemem_decline, 0, "active->inactive file cache");
160
161 SYSCTL_INT(_vm, OID_AUTO, page_free_hysteresis,
162         CTLFLAG_RW, &vm_page_free_hysteresis, 0,
163         "Free more pages than the minimum required");
164
165 SYSCTL_INT(_vm, OID_AUTO, max_launder,
166         CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout");
167 SYSCTL_INT(_vm, OID_AUTO, emerg_launder,
168         CTLFLAG_RW, &vm_emerg_launder, 0, "Emergency pager minimum");
169
170 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
171         CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
172
173 SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
174         CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
175
176 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
177         CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
178
179 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
180         CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented");
181 SYSCTL_INT(_vm, OID_AUTO, pageout_memuse_mode,
182         CTLFLAG_RW, &vm_pageout_memuse_mode, 0, "memoryuse resource mode");
183
184 #if defined(NO_SWAPPING)
185 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
186         CTLFLAG_RD, &vm_swap_enabled, 0, "");
187 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
188         CTLFLAG_RD, &vm_swap_idle_enabled, 0, "");
189 #else
190 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
191         CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
192 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
193         CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
194 #endif
195
196 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
197         CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
198
199 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
200         CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
201
202 static int pageout_lock_miss;
203 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
204         CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
205
206 int vm_page_max_wired;          /* XXX max # of wired pages system-wide */
207
208 #if !defined(NO_SWAPPING)
209 static void vm_req_vmdaemon (void);
210 #endif
211 static void vm_pageout_page_stats(int q);
212
213 /*
214  * Calculate approximately how many pages on each queue to try to
215  * clean.  An exact calculation creates an edge condition when the
216  * queues are unbalanced so add significant slop.  The queue scans
217  * will stop early when targets are reached and will start where they
218  * left off on the next pass.
219  *
220  * We need to be generous here because there are all sorts of loading
221  * conditions that can cause edge cases if try to average over all queues.
222  * In particular, storage subsystems have become so fast that paging
223  * activity can become quite frantic.  Eventually we will probably need
224  * two paging threads, one for dirty pages and one for clean, to deal
225  * with the bandwidth requirements.
226
227  * So what we do is calculate a value that can be satisfied nominally by
228  * only having to scan half the queues.
229  */
230 static __inline long
231 PQAVERAGE(long n)
232 {
233         long avg;
234
235         if (n >= 0) {
236                 avg = ((n + (PQ_L2_SIZE - 1)) / (PQ_L2_SIZE / 2) + 1);
237         } else {
238                 avg = ((n - (PQ_L2_SIZE - 1)) / (PQ_L2_SIZE / 2) - 1);
239         }
240         return avg;
241 }
242
243 /*
244  * vm_pageout_clean_helper:
245  *
246  * Clean the page and remove it from the laundry.  The page must be busied
247  * by the caller and will be disposed of (put away, flushed) by this routine.
248  */
249 static int
250 vm_pageout_clean_helper(vm_page_t m, int vmflush_flags)
251 {
252         vm_object_t object;
253         vm_page_t mc[BLIST_MAX_ALLOC];
254         int error;
255         int ib, is, page_base;
256         vm_pindex_t pindex = m->pindex;
257
258         object = m->object;
259
260         /*
261          * Don't mess with the page if it's held or special.  Theoretically
262          * we can pageout held pages but there is no real need to press our
263          * luck, so don't.
264          */
265         if (m->hold_count != 0 || (m->flags & PG_UNMANAGED)) {
266                 vm_page_wakeup(m);
267                 return 0;
268         }
269
270         /*
271          * Place page in cluster.  Align cluster for optimal swap space
272          * allocation (whether it is swap or not).  This is typically ~16-32
273          * pages, which also tends to align the cluster to multiples of the
274          * filesystem block size if backed by a filesystem.
275          */
276         page_base = pindex % BLIST_MAX_ALLOC;
277         mc[page_base] = m;
278         ib = page_base - 1;
279         is = page_base + 1;
280
281         /*
282          * Scan object for clusterable pages.
283          *
284          * We can cluster ONLY if: ->> the page is NOT
285          * clean, wired, busy, held, or mapped into a
286          * buffer, and one of the following:
287          * 1) The page is inactive, or a seldom used
288          *    active page.
289          * -or-
290          * 2) we force the issue.
291          *
292          * During heavy mmap/modification loads the pageout
293          * daemon can really fragment the underlying file
294          * due to flushing pages out of order and not trying
295          * align the clusters (which leave sporatic out-of-order
296          * holes).  To solve this problem we do the reverse scan
297          * first and attempt to align our cluster, then do a 
298          * forward scan if room remains.
299          */
300         vm_object_hold(object);
301
302         while (ib >= 0) {
303                 vm_page_t p;
304
305                 p = vm_page_lookup_busy_try(object, pindex - page_base + ib,
306                                             TRUE, &error);
307                 if (error || p == NULL)
308                         break;
309                 if ((p->queue - p->pc) == PQ_CACHE ||
310                     (p->flags & PG_UNMANAGED)) {
311                         vm_page_wakeup(p);
312                         break;
313                 }
314                 vm_page_test_dirty(p);
315                 if (((p->dirty & p->valid) == 0 &&
316                      (p->flags & PG_NEED_COMMIT) == 0) ||
317                     p->wire_count != 0 ||       /* may be held by buf cache */
318                     p->hold_count != 0) {       /* may be undergoing I/O */
319                         vm_page_wakeup(p);
320                         break;
321                 }
322                 if (p->queue - p->pc != PQ_INACTIVE) {
323                         if (p->queue - p->pc != PQ_ACTIVE ||
324                             (vmflush_flags & VM_PAGER_ALLOW_ACTIVE) == 0) {
325                                 vm_page_wakeup(p);
326                                 break;
327                         }
328                 }
329
330                 /*
331                  * Try to maintain page groupings in the cluster.
332                  */
333                 if (m->flags & PG_WINATCFLS)
334                         vm_page_flag_set(p, PG_WINATCFLS);
335                 else
336                         vm_page_flag_clear(p, PG_WINATCFLS);
337                 p->act_count = m->act_count;
338
339                 mc[ib] = p;
340                 --ib;
341         }
342         ++ib;   /* fixup */
343
344         while (is < BLIST_MAX_ALLOC &&
345                pindex - page_base + is < object->size) {
346                 vm_page_t p;
347
348                 p = vm_page_lookup_busy_try(object, pindex - page_base + is,
349                                             TRUE, &error);
350                 if (error || p == NULL)
351                         break;
352                 if (((p->queue - p->pc) == PQ_CACHE) ||
353                     (p->flags & PG_UNMANAGED)) {
354                         vm_page_wakeup(p);
355                         break;
356                 }
357                 vm_page_test_dirty(p);
358                 if (((p->dirty & p->valid) == 0 &&
359                      (p->flags & PG_NEED_COMMIT) == 0) ||
360                     p->wire_count != 0 ||       /* may be held by buf cache */
361                     p->hold_count != 0) {       /* may be undergoing I/O */
362                         vm_page_wakeup(p);
363                         break;
364                 }
365                 if (p->queue - p->pc != PQ_INACTIVE) {
366                         if (p->queue - p->pc != PQ_ACTIVE ||
367                             (vmflush_flags & VM_PAGER_ALLOW_ACTIVE) == 0) {
368                                 vm_page_wakeup(p);
369                                 break;
370                         }
371                 }
372
373                 /*
374                  * Try to maintain page groupings in the cluster.
375                  */
376                 if (m->flags & PG_WINATCFLS)
377                         vm_page_flag_set(p, PG_WINATCFLS);
378                 else
379                         vm_page_flag_clear(p, PG_WINATCFLS);
380                 p->act_count = m->act_count;
381
382                 mc[is] = p;
383                 ++is;
384         }
385
386         vm_object_drop(object);
387
388         /*
389          * we allow reads during pageouts...
390          */
391         return vm_pageout_flush(&mc[ib], is - ib, vmflush_flags);
392 }
393
394 /*
395  * vm_pageout_flush() - launder the given pages
396  *
397  *      The given pages are laundered.  Note that we setup for the start of
398  *      I/O ( i.e. busy the page ), mark it read-only, and bump the object
399  *      reference count all in here rather then in the parent.  If we want
400  *      the parent to do more sophisticated things we may have to change
401  *      the ordering.
402  *
403  *      The pages in the array must be busied by the caller and will be
404  *      unbusied by this function.
405  */
406 int
407 vm_pageout_flush(vm_page_t *mc, int count, int vmflush_flags)
408 {
409         vm_object_t object;
410         int pageout_status[count];
411         int numpagedout = 0;
412         int i;
413
414         /*
415          * Initiate I/O.  Bump the vm_page_t->busy counter.
416          */
417         for (i = 0; i < count; i++) {
418                 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
419                         ("vm_pageout_flush page %p index %d/%d: partially "
420                          "invalid page", mc[i], i, count));
421                 vm_page_io_start(mc[i]);
422         }
423
424         /*
425          * We must make the pages read-only.  This will also force the
426          * modified bit in the related pmaps to be cleared.  The pager
427          * cannot clear the bit for us since the I/O completion code
428          * typically runs from an interrupt.  The act of making the page
429          * read-only handles the case for us.
430          *
431          * Then we can unbusy the pages, we still hold a reference by virtue
432          * of our soft-busy.
433          */
434         for (i = 0; i < count; i++) {
435                 if (vmflush_flags & VM_PAGER_TRY_TO_CACHE)
436                         vm_page_protect(mc[i], VM_PROT_NONE);
437                 else
438                         vm_page_protect(mc[i], VM_PROT_READ);
439                 vm_page_wakeup(mc[i]);
440         }
441
442         object = mc[0]->object;
443         vm_object_pip_add(object, count);
444
445         vm_pager_put_pages(object, mc, count,
446                            (vmflush_flags |
447                             ((object == &kernel_object) ?
448                                 VM_PAGER_PUT_SYNC : 0)),
449                            pageout_status);
450
451         for (i = 0; i < count; i++) {
452                 vm_page_t mt = mc[i];
453
454                 switch (pageout_status[i]) {
455                 case VM_PAGER_OK:
456                         numpagedout++;
457                         break;
458                 case VM_PAGER_PEND:
459                         numpagedout++;
460                         break;
461                 case VM_PAGER_BAD:
462                         /*
463                          * Page outside of range of object. Right now we
464                          * essentially lose the changes by pretending it
465                          * worked.
466                          */
467                         vm_page_busy_wait(mt, FALSE, "pgbad");
468                         pmap_clear_modify(mt);
469                         vm_page_undirty(mt);
470                         vm_page_wakeup(mt);
471                         break;
472                 case VM_PAGER_ERROR:
473                 case VM_PAGER_FAIL:
474                         /*
475                          * A page typically cannot be paged out when we
476                          * have run out of swap.  We leave the page
477                          * marked inactive and will try to page it out
478                          * again later.
479                          *
480                          * Starvation of the active page list is used to
481                          * determine when the system is massively memory
482                          * starved.
483                          */
484                         break;
485                 case VM_PAGER_AGAIN:
486                         break;
487                 }
488
489                 /*
490                  * If not PENDing this was a synchronous operation and we
491                  * clean up after the I/O.  If it is PENDing the mess is
492                  * cleaned up asynchronously.
493                  *
494                  * Also nominally act on the caller's wishes if the caller
495                  * wants to try to really clean (cache or free) the page.
496                  *
497                  * Also nominally deactivate the page if the system is
498                  * memory-stressed.
499                  */
500                 if (pageout_status[i] != VM_PAGER_PEND) {
501                         vm_page_busy_wait(mt, FALSE, "pgouw");
502                         vm_page_io_finish(mt);
503                         if (vmflush_flags & VM_PAGER_TRY_TO_CACHE) {
504                                 vm_page_try_to_cache(mt);
505                         } else if (vm_page_count_severe()) {
506                                 vm_page_deactivate(mt);
507                                 vm_page_wakeup(mt);
508                         } else {
509                                 vm_page_wakeup(mt);
510                         }
511                         vm_object_pip_wakeup(object);
512                 }
513         }
514         return numpagedout;
515 }
516
517 #if !defined(NO_SWAPPING)
518
519 /*
520  * Callback function, page busied for us.  We must dispose of the busy
521  * condition.  Any related pmap pages may be held but will not be locked.
522  */
523 static
524 int
525 vm_pageout_mdp_callback(struct pmap_pgscan_info *info, vm_offset_t va,
526                         vm_page_t p)
527 {
528         int actcount;
529         int cleanit = 0;
530
531         /*
532          * Basic tests - There should never be a marker, and we can stop
533          *               once the RSS is below the required level.
534          */
535         KKASSERT((p->flags & PG_MARKER) == 0);
536         if (pmap_resident_tlnw_count(info->pmap) <= info->limit) {
537                 vm_page_wakeup(p);
538                 return(-1);
539         }
540
541         mycpu->gd_cnt.v_pdpages++;
542
543         if (p->wire_count || p->hold_count || (p->flags & PG_UNMANAGED)) {
544                 vm_page_wakeup(p);
545                 goto done;
546         }
547
548         ++info->actioncount;
549
550         /*
551          * Check if the page has been referened recently.  If it has,
552          * activate it and skip.
553          */
554         actcount = pmap_ts_referenced(p);
555         if (actcount) {
556                 vm_page_flag_set(p, PG_REFERENCED);
557         } else if (p->flags & PG_REFERENCED) {
558                 actcount = 1;
559         }
560
561         if (actcount) {
562                 if (p->queue - p->pc != PQ_ACTIVE) {
563                         vm_page_and_queue_spin_lock(p);
564                         if (p->queue - p->pc != PQ_ACTIVE) {
565                                 vm_page_and_queue_spin_unlock(p);
566                                 vm_page_activate(p);
567                         } else {
568                                 vm_page_and_queue_spin_unlock(p);
569                         }
570                 } else {
571                         p->act_count += actcount;
572                         if (p->act_count > ACT_MAX)
573                                 p->act_count = ACT_MAX;
574                 }
575                 vm_page_flag_clear(p, PG_REFERENCED);
576                 vm_page_wakeup(p);
577                 goto done;
578         }
579
580         /*
581          * Remove the page from this particular pmap.  Once we do this, our
582          * pmap scans will not see it again (unless it gets faulted in), so
583          * we must actively dispose of or deal with the page.
584          */
585         pmap_remove_specific(info->pmap, p);
586
587         /*
588          * If the page is not mapped to another process (i.e. as would be
589          * typical if this were a shared page from a library) then deactivate
590          * the page and clean it in two passes only.
591          *
592          * If the page hasn't been referenced since the last check, remove it
593          * from the pmap.  If it is no longer mapped, deactivate it
594          * immediately, accelerating the normal decline.
595          *
596          * Once the page has been removed from the pmap the RSS code no
597          * longer tracks it so we have to make sure that it is staged for
598          * potential flush action.
599          */
600         if ((p->flags & PG_MAPPED) == 0) {
601                 if (p->queue - p->pc == PQ_ACTIVE) {
602                         vm_page_deactivate(p);
603                 }
604                 if (p->queue - p->pc == PQ_INACTIVE) {
605                         cleanit = 1;
606                 }
607         }
608
609         /*
610          * Ok, try to fully clean the page and any nearby pages such that at
611          * least the requested page is freed or moved to the cache queue.
612          *
613          * We usually do this synchronously to allow us to get the page into
614          * the CACHE queue quickly, which will prevent memory exhaustion if
615          * a process with a memoryuse limit is running away.  However, the
616          * sysadmin may desire to set vm.swap_user_async which relaxes this
617          * and improves write performance.
618          */
619         if (cleanit) {
620                 long max_launder = 0x7FFF;
621                 long vnodes_skipped = 0;
622                 int vmflush_flags;
623                 struct vnode *vpfailed = NULL;
624
625                 info->offset = va;
626
627                 if (vm_pageout_memuse_mode >= 2) {
628                         vmflush_flags = VM_PAGER_TRY_TO_CACHE |
629                                         VM_PAGER_ALLOW_ACTIVE;
630                         if (swap_user_async == 0)
631                                 vmflush_flags |= VM_PAGER_PUT_SYNC;
632                         vm_page_flag_set(p, PG_WINATCFLS);
633                         info->cleancount +=
634                                 vm_pageout_page(p, &max_launder,
635                                                 &vnodes_skipped,
636                                                 &vpfailed, 1, vmflush_flags);
637                 } else {
638                         vm_page_wakeup(p);
639                         ++info->cleancount;
640                 }
641         } else {
642                 vm_page_wakeup(p);
643         }
644
645         /*
646          * Must be at end to avoid SMP races.
647          */
648 done:
649         lwkt_user_yield();
650         return 0;
651 }
652
653 /*
654  * Deactivate some number of pages in a map due to set RLIMIT_RSS limits.
655  * that is relatively difficult to do.  We try to keep track of where we
656  * left off last time to reduce scan overhead.
657  *
658  * Called when vm_pageout_memuse_mode is >= 1.
659  */
660 void
661 vm_pageout_map_deactivate_pages(vm_map_t map, vm_pindex_t limit)
662 {
663         vm_offset_t pgout_offset;
664         struct pmap_pgscan_info info;
665         int retries = 3;
666
667         pgout_offset = map->pgout_offset;
668 again:
669 #if 0
670         kprintf("%016jx ", pgout_offset);
671 #endif
672         if (pgout_offset < VM_MIN_USER_ADDRESS)
673                 pgout_offset = VM_MIN_USER_ADDRESS;
674         if (pgout_offset >= VM_MAX_USER_ADDRESS)
675                 pgout_offset = 0;
676         info.pmap = vm_map_pmap(map);
677         info.limit = limit;
678         info.beg_addr = pgout_offset;
679         info.end_addr = VM_MAX_USER_ADDRESS;
680         info.callback = vm_pageout_mdp_callback;
681         info.cleancount = 0;
682         info.actioncount = 0;
683         info.busycount = 0;
684
685         pmap_pgscan(&info);
686         pgout_offset = info.offset;
687 #if 0
688         kprintf("%016jx %08lx %08lx\n", pgout_offset,
689                 info.cleancount, info.actioncount);
690 #endif
691
692         if (pgout_offset != VM_MAX_USER_ADDRESS &&
693             pmap_resident_tlnw_count(vm_map_pmap(map)) > limit) {
694                 goto again;
695         } else if (retries &&
696                    pmap_resident_tlnw_count(vm_map_pmap(map)) > limit) {
697                 --retries;
698                 goto again;
699         }
700         map->pgout_offset = pgout_offset;
701 }
702 #endif
703
704 /*
705  * Called when the pageout scan wants to free a page.  We no longer
706  * try to cycle the vm_object here with a reference & dealloc, which can
707  * cause a non-trivial object collapse in a critical path.
708  *
709  * It is unclear why we cycled the ref_count in the past, perhaps to try
710  * to optimize shadow chain collapses but I don't quite see why it would
711  * be necessary.  An OBJ_DEAD object should terminate any and all vm_pages
712  * synchronously and not have to be kicked-start.
713  */
714 static void
715 vm_pageout_page_free(vm_page_t m) 
716 {
717         vm_page_protect(m, VM_PROT_NONE);
718         vm_page_free(m);
719 }
720
721 /*
722  * vm_pageout_scan does the dirty work for the pageout daemon.
723  */
724 struct vm_pageout_scan_info {
725         struct proc *bigproc;
726         vm_offset_t bigsize;
727 };
728
729 static int vm_pageout_scan_callback(struct proc *p, void *data);
730
731 /*
732  * Scan inactive queue
733  *
734  * WARNING! Can be called from two pagedaemon threads simultaneously.
735  */
736 static int
737 vm_pageout_scan_inactive(int pass, int q, long avail_shortage,
738                          long *vnodes_skipped)
739 {
740         vm_page_t m;
741         struct vm_page marker;
742         struct vnode *vpfailed;         /* warning, allowed to be stale */
743         int maxscan;
744         long delta = 0;
745         long max_launder;
746         int isep;
747
748         isep = (curthread == emergpager);
749
750         /*
751          * Start scanning the inactive queue for pages we can move to the
752          * cache or free.  The scan will stop when the target is reached or
753          * we have scanned the entire inactive queue.  Note that m->act_count
754          * is not used to form decisions for the inactive queue, only for the
755          * active queue.
756          *
757          * max_launder limits the number of dirty pages we flush per scan.
758          * For most systems a smaller value (16 or 32) is more robust under
759          * extreme memory and disk pressure because any unnecessary writes
760          * to disk can result in extreme performance degredation.  However,
761          * systems with excessive dirty pages (especially when MAP_NOSYNC is
762          * used) will die horribly with limited laundering.  If the pageout
763          * daemon cannot clean enough pages in the first pass, we let it go
764          * all out in succeeding passes.
765          *
766          * NOTE!  THE EMERGENCY PAGER (isep) DOES NOT LAUNDER VNODE-BACKED
767          *        PAGES.
768          */
769         if ((max_launder = vm_max_launder) <= 1)
770                 max_launder = 1;
771         if (pass)
772                 max_launder = 10000;
773
774         /*
775          * Initialize our marker
776          */
777         bzero(&marker, sizeof(marker));
778         marker.flags = PG_FICTITIOUS | PG_MARKER;
779         marker.busy_count = PBUSY_LOCKED;
780         marker.queue = PQ_INACTIVE + q;
781         marker.pc = q;
782         marker.wire_count = 1;
783
784         /*
785          * Inactive queue scan.
786          *
787          * NOTE: The vm_page must be spinlocked before the queue to avoid
788          *       deadlocks, so it is easiest to simply iterate the loop
789          *       with the queue unlocked at the top.
790          */
791         vpfailed = NULL;
792
793         vm_page_queues_spin_lock(PQ_INACTIVE + q);
794         TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
795         maxscan = vm_page_queues[PQ_INACTIVE + q].lcnt;
796
797         /*
798          * Queue locked at top of loop to avoid stack marker issues.
799          */
800         while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
801                maxscan-- > 0 && avail_shortage - delta > 0)
802         {
803                 int count;
804
805                 KKASSERT(m->queue == PQ_INACTIVE + q);
806                 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl,
807                              &marker, pageq);
808                 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE + q].pl, m,
809                                    &marker, pageq);
810                 mycpu->gd_cnt.v_pdpages++;
811
812                 /*
813                  * Skip marker pages (atomic against other markers to avoid
814                  * infinite hop-over scans).
815                  */
816                 if (m->flags & PG_MARKER)
817                         continue;
818
819                 /*
820                  * Try to busy the page.  Don't mess with pages which are
821                  * already busy or reorder them in the queue.
822                  */
823                 if (vm_page_busy_try(m, TRUE))
824                         continue;
825
826                 /*
827                  * Remaining operations run with the page busy and neither
828                  * the page or the queue will be spin-locked.
829                  */
830                 KKASSERT(m->queue == PQ_INACTIVE + q);
831                 vm_page_queues_spin_unlock(PQ_INACTIVE + q);
832
833                 /*
834                  * The emergency pager runs when the primary pager gets
835                  * stuck, which typically means the primary pager deadlocked
836                  * on a vnode-backed page.  Therefore, the emergency pager
837                  * must skip any complex objects.
838                  *
839                  * We disallow VNODEs unless they are VCHR whos device ops
840                  * does not flag D_NOEMERGPGR.
841                  */
842                 if (isep && m->object) {
843                         struct vnode *vp;
844
845                         switch(m->object->type) {
846                         case OBJT_DEFAULT:
847                         case OBJT_SWAP:
848                                 /*
849                                  * Allow anonymous memory and assume that
850                                  * swap devices are not complex, since its
851                                  * kinda worthless if we can't swap out dirty
852                                  * anonymous pages.
853                                  */
854                                 break;
855                         case OBJT_VNODE:
856                                 /*
857                                  * Allow VCHR device if the D_NOEMERGPGR
858                                  * flag is not set, deny other vnode types
859                                  * as being too complex.
860                                  */
861                                 vp = m->object->handle;
862                                 if (vp && vp->v_type == VCHR &&
863                                     vp->v_rdev && vp->v_rdev->si_ops &&
864                                     (vp->v_rdev->si_ops->head.flags &
865                                      D_NOEMERGPGR) == 0) {
866                                         break;
867                                 }
868                                 /* Deny - fall through */
869                         default:
870                                 /*
871                                  * Deny
872                                  */
873                                 vm_page_wakeup(m);
874                                 vm_page_queues_spin_lock(PQ_INACTIVE + q);
875                                 lwkt_yield();
876                                 continue;
877                         }
878                 }
879
880                 /*
881                  * Try to pageout the page and perhaps other nearby pages.
882                  */
883                 count = vm_pageout_page(m, &max_launder, vnodes_skipped,
884                                         &vpfailed, pass, 0);
885                 delta += count;
886
887                 /*
888                  * Systems with a ton of memory can wind up with huge
889                  * deactivation counts.  Because the inactive scan is
890                  * doing a lot of flushing, the combination can result
891                  * in excessive paging even in situations where other
892                  * unrelated threads free up sufficient VM.
893                  *
894                  * To deal with this we abort the nominal active->inactive
895                  * scan before we hit the inactive target when free+cache
896                  * levels have reached a reasonable target.
897                  *
898                  * When deciding to stop early we need to add some slop to
899                  * the test and we need to return full completion to the caller
900                  * to prevent the caller from thinking there is something
901                  * wrong and issuing a low-memory+swap warning or pkill.
902                  *
903                  * A deficit forces paging regardless of the state of the
904                  * VM page queues (used for RSS enforcement).
905                  */
906                 lwkt_yield();
907                 vm_page_queues_spin_lock(PQ_INACTIVE + q);
908                 if (vm_paging_target() < -vm_max_launder) {
909                         /*
910                          * Stopping early, return full completion to caller.
911                          */
912                         if (delta < avail_shortage)
913                                 delta = avail_shortage;
914                         break;
915                 }
916         }
917
918         /* page queue still spin-locked */
919         TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
920         vm_page_queues_spin_unlock(PQ_INACTIVE + q);
921
922         return (delta);
923 }
924
925 /*
926  * Pageout the specified page, return the total number of pages paged out
927  * (this routine may cluster).
928  *
929  * The page must be busied and soft-busied by the caller and will be disposed
930  * of by this function.
931  */
932 static int
933 vm_pageout_page(vm_page_t m, long *max_launderp, long *vnodes_skippedp,
934                 struct vnode **vpfailedp, int pass, int vmflush_flags)
935 {
936         vm_object_t object;
937         int actcount;
938         int count = 0;
939
940         /*
941          * Wiring no longer removes a page from its queue.  The last unwiring
942          * will requeue the page.  Obviously wired pages cannot be paged out
943          * so unqueue it and return.
944          */
945         if (m->wire_count) {
946                 vm_page_unqueue_nowakeup(m);
947                 vm_page_wakeup(m);
948                 return 0;
949         }
950
951         /*
952          * A held page may be undergoing I/O, so skip it.
953          */
954         if (m->hold_count) {
955                 vm_page_and_queue_spin_lock(m);
956                 if (m->queue - m->pc == PQ_INACTIVE) {
957                         TAILQ_REMOVE(
958                                 &vm_page_queues[m->queue].pl, m, pageq);
959                         TAILQ_INSERT_TAIL(
960                                 &vm_page_queues[m->queue].pl, m, pageq);
961                         ++vm_swapcache_inactive_heuristic;
962                 }
963                 vm_page_and_queue_spin_unlock(m);
964                 vm_page_wakeup(m);
965                 return 0;
966         }
967
968         if (m->object == NULL || m->object->ref_count == 0) {
969                 /*
970                  * If the object is not being used, we ignore previous
971                  * references.
972                  */
973                 vm_page_flag_clear(m, PG_REFERENCED);
974                 pmap_clear_reference(m);
975                 /* fall through to end */
976         } else if (((m->flags & PG_REFERENCED) == 0) &&
977                     (actcount = pmap_ts_referenced(m))) {
978                 /*
979                  * Otherwise, if the page has been referenced while
980                  * in the inactive queue, we bump the "activation
981                  * count" upwards, making it less likely that the
982                  * page will be added back to the inactive queue
983                  * prematurely again.  Here we check the page tables
984                  * (or emulated bits, if any), given the upper level
985                  * VM system not knowing anything about existing
986                  * references.
987                  */
988                 vm_page_activate(m);
989                 m->act_count += (actcount + ACT_ADVANCE);
990                 vm_page_wakeup(m);
991                 return 0;
992         }
993
994         /*
995          * (m) is still busied.
996          *
997          * If the upper level VM system knows about any page
998          * references, we activate the page.  We also set the
999          * "activation count" higher than normal so that we will less
1000          * likely place pages back onto the inactive queue again.
1001          */
1002         if ((m->flags & PG_REFERENCED) != 0) {
1003                 vm_page_flag_clear(m, PG_REFERENCED);
1004                 actcount = pmap_ts_referenced(m);
1005                 vm_page_activate(m);
1006                 m->act_count += (actcount + ACT_ADVANCE + 1);
1007                 vm_page_wakeup(m);
1008                 return 0;
1009         }
1010
1011         /*
1012          * If the upper level VM system doesn't know anything about
1013          * the page being dirty, we have to check for it again.  As
1014          * far as the VM code knows, any partially dirty pages are
1015          * fully dirty.
1016          *
1017          * Pages marked PG_WRITEABLE may be mapped into the user
1018          * address space of a process running on another cpu.  A
1019          * user process (without holding the MP lock) running on
1020          * another cpu may be able to touch the page while we are
1021          * trying to remove it.  vm_page_cache() will handle this
1022          * case for us.
1023          */
1024         if (m->dirty == 0) {
1025                 vm_page_test_dirty(m);
1026         } else {
1027                 vm_page_dirty(m);
1028         }
1029
1030         if (m->valid == 0 && (m->flags & PG_NEED_COMMIT) == 0) {
1031                 /*
1032                  * Invalid pages can be easily freed
1033                  */
1034                 vm_pageout_page_free(m);
1035                 mycpu->gd_cnt.v_dfree++;
1036                 ++count;
1037         } else if (m->dirty == 0 && (m->flags & PG_NEED_COMMIT) == 0) {
1038                 /*
1039                  * Clean pages can be placed onto the cache queue.
1040                  * This effectively frees them.
1041                  */
1042                 vm_page_cache(m);
1043                 ++count;
1044         } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) {
1045                 /*
1046                  * Dirty pages need to be paged out, but flushing
1047                  * a page is extremely expensive verses freeing
1048                  * a clean page.  Rather then artificially limiting
1049                  * the number of pages we can flush, we instead give
1050                  * dirty pages extra priority on the inactive queue
1051                  * by forcing them to be cycled through the queue
1052                  * twice before being flushed, after which the
1053                  * (now clean) page will cycle through once more
1054                  * before being freed.  This significantly extends
1055                  * the thrash point for a heavily loaded machine.
1056                  */
1057                 vm_page_flag_set(m, PG_WINATCFLS);
1058                 vm_page_and_queue_spin_lock(m);
1059                 if (m->queue - m->pc == PQ_INACTIVE) {
1060                         TAILQ_REMOVE(
1061                                 &vm_page_queues[m->queue].pl, m, pageq);
1062                         TAILQ_INSERT_TAIL(
1063                                 &vm_page_queues[m->queue].pl, m, pageq);
1064                         ++vm_swapcache_inactive_heuristic;
1065                 }
1066                 vm_page_and_queue_spin_unlock(m);
1067                 vm_page_wakeup(m);
1068         } else if (*max_launderp > 0) {
1069                 /*
1070                  * We always want to try to flush some dirty pages if
1071                  * we encounter them, to keep the system stable.
1072                  * Normally this number is small, but under extreme
1073                  * pressure where there are insufficient clean pages
1074                  * on the inactive queue, we may have to go all out.
1075                  */
1076                 int swap_pageouts_ok;
1077                 struct vnode *vp = NULL;
1078
1079                 swap_pageouts_ok = 0;
1080                 object = m->object;
1081                 if (object &&
1082                     (object->type != OBJT_SWAP) &&
1083                     (object->type != OBJT_DEFAULT)) {
1084                         swap_pageouts_ok = 1;
1085                 } else {
1086                         swap_pageouts_ok = !(defer_swap_pageouts ||
1087                                              disable_swap_pageouts);
1088                         swap_pageouts_ok |= (!disable_swap_pageouts &&
1089                                              defer_swap_pageouts &&
1090                                              vm_page_count_min(0));
1091                 }
1092
1093                 /*
1094                  * We don't bother paging objects that are "dead".
1095                  * Those objects are in a "rundown" state.
1096                  */
1097                 if (!swap_pageouts_ok ||
1098                     (object == NULL) ||
1099                     (object->flags & OBJ_DEAD)) {
1100                         vm_page_and_queue_spin_lock(m);
1101                         if (m->queue - m->pc == PQ_INACTIVE) {
1102                                 TAILQ_REMOVE(
1103                                     &vm_page_queues[m->queue].pl,
1104                                     m, pageq);
1105                                 TAILQ_INSERT_TAIL(
1106                                     &vm_page_queues[m->queue].pl,
1107                                     m, pageq);
1108                                 ++vm_swapcache_inactive_heuristic;
1109                         }
1110                         vm_page_and_queue_spin_unlock(m);
1111                         vm_page_wakeup(m);
1112                         return 0;
1113                 }
1114
1115                 /*
1116                  * (m) is still busied.
1117                  *
1118                  * The object is already known NOT to be dead.   It
1119                  * is possible for the vget() to block the whole
1120                  * pageout daemon, but the new low-memory handling
1121                  * code should prevent it.
1122                  *
1123                  * The previous code skipped locked vnodes and, worse,
1124                  * reordered pages in the queue.  This results in
1125                  * completely non-deterministic operation because,
1126                  * quite often, a vm_fault has initiated an I/O and
1127                  * is holding a locked vnode at just the point where
1128                  * the pageout daemon is woken up.
1129                  *
1130                  * We can't wait forever for the vnode lock, we might
1131                  * deadlock due to a vn_read() getting stuck in
1132                  * vm_wait while holding this vnode.  We skip the
1133                  * vnode if we can't get it in a reasonable amount
1134                  * of time.
1135                  *
1136                  * vpfailed is used to (try to) avoid the case where
1137                  * a large number of pages are associated with a
1138                  * locked vnode, which could cause the pageout daemon
1139                  * to stall for an excessive amount of time.
1140                  */
1141                 if (object->type == OBJT_VNODE) {
1142                         int flags;
1143
1144                         vp = object->handle;
1145                         flags = LK_EXCLUSIVE;
1146                         if (vp == *vpfailedp)
1147                                 flags |= LK_NOWAIT;
1148                         else
1149                                 flags |= LK_TIMELOCK;
1150                         vm_page_hold(m);
1151                         vm_page_wakeup(m);
1152
1153                         /*
1154                          * We have unbusied (m) temporarily so we can
1155                          * acquire the vp lock without deadlocking.
1156                          * (m) is held to prevent destruction.
1157                          */
1158                         if (vget(vp, flags) != 0) {
1159                                 *vpfailedp = vp;
1160                                 ++pageout_lock_miss;
1161                                 if (object->flags & OBJ_MIGHTBEDIRTY)
1162                                             ++*vnodes_skippedp;
1163                                 vm_page_unhold(m);
1164                                 return 0;
1165                         }
1166
1167                         /*
1168                          * The page might have been moved to another
1169                          * queue during potential blocking in vget()
1170                          * above.  The page might have been freed and
1171                          * reused for another vnode.  The object might
1172                          * have been reused for another vnode.
1173                          */
1174                         if (m->queue - m->pc != PQ_INACTIVE ||
1175                             m->object != object ||
1176                             object->handle != vp) {
1177                                 if (object->flags & OBJ_MIGHTBEDIRTY)
1178                                         ++*vnodes_skippedp;
1179                                 vput(vp);
1180                                 vm_page_unhold(m);
1181                                 return 0;
1182                         }
1183
1184                         /*
1185                          * The page may have been busied during the
1186                          * blocking in vput();  We don't move the
1187                          * page back onto the end of the queue so that
1188                          * statistics are more correct if we don't.
1189                          */
1190                         if (vm_page_busy_try(m, TRUE)) {
1191                                 vput(vp);
1192                                 vm_page_unhold(m);
1193                                 return 0;
1194                         }
1195                         vm_page_unhold(m);
1196
1197                         /*
1198                          * If it was wired while we didn't own it.
1199                          */
1200                         if (m->wire_count) {
1201                                 vm_page_unqueue_nowakeup(m);
1202                                 vput(vp);
1203                                 vm_page_wakeup(m);
1204                                 return 0;
1205                         }
1206
1207                         /*
1208                          * (m) is busied again
1209                          *
1210                          * We own the busy bit and remove our hold
1211                          * bit.  If the page is still held it
1212                          * might be undergoing I/O, so skip it.
1213                          */
1214                         if (m->hold_count) {
1215                                 vm_page_and_queue_spin_lock(m);
1216                                 if (m->queue - m->pc == PQ_INACTIVE) {
1217                                         TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq);
1218                                         TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m, pageq);
1219                                         ++vm_swapcache_inactive_heuristic;
1220                                 }
1221                                 vm_page_and_queue_spin_unlock(m);
1222                                 if (object->flags & OBJ_MIGHTBEDIRTY)
1223                                         ++*vnodes_skippedp;
1224                                 vm_page_wakeup(m);
1225                                 vput(vp);
1226                                 return 0;
1227                         }
1228                         /* (m) is left busied as we fall through */
1229                 }
1230
1231                 /*
1232                  * page is busy and not held here.
1233                  *
1234                  * If a page is dirty, then it is either being washed
1235                  * (but not yet cleaned) or it is still in the
1236                  * laundry.  If it is still in the laundry, then we
1237                  * start the cleaning operation.
1238                  *
1239                  * decrement inactive_shortage on success to account
1240                  * for the (future) cleaned page.  Otherwise we
1241                  * could wind up laundering or cleaning too many
1242                  * pages.
1243                  *
1244                  * NOTE: Cleaning the page here does not cause
1245                  *       force_deficit to be adjusted, because the
1246                  *       page is not being freed or moved to the
1247                  *       cache.
1248                  */
1249                 count = vm_pageout_clean_helper(m, vmflush_flags);
1250                 *max_launderp -= count;
1251
1252                 /*
1253                  * Clean ate busy, page no longer accessible
1254                  */
1255                 if (vp != NULL)
1256                         vput(vp);
1257         } else {
1258                 vm_page_wakeup(m);
1259         }
1260         return count;
1261 }
1262
1263 /*
1264  * Scan active queue
1265  *
1266  * WARNING! Can be called from two pagedaemon threads simultaneously.
1267  */
1268 static int
1269 vm_pageout_scan_active(int pass, int q,
1270                        long avail_shortage, long inactive_shortage,
1271                        long *recycle_countp)
1272 {
1273         struct vm_page marker;
1274         vm_page_t m;
1275         int actcount;
1276         long delta = 0;
1277         long maxscan;
1278         int isep;
1279
1280         isep = (curthread == emergpager);
1281
1282         /*
1283          * We want to move pages from the active queue to the inactive
1284          * queue to get the inactive queue to the inactive target.  If
1285          * we still have a page shortage from above we try to directly free
1286          * clean pages instead of moving them.
1287          *
1288          * If we do still have a shortage we keep track of the number of
1289          * pages we free or cache (recycle_count) as a measure of thrashing
1290          * between the active and inactive queues.
1291          *
1292          * If we were able to completely satisfy the free+cache targets
1293          * from the inactive pool we limit the number of pages we move
1294          * from the active pool to the inactive pool to 2x the pages we
1295          * had removed from the inactive pool (with a minimum of 1/5 the
1296          * inactive target).  If we were not able to completely satisfy
1297          * the free+cache targets we go for the whole target aggressively.
1298          *
1299          * NOTE: Both variables can end up negative.
1300          * NOTE: We are still in a critical section.
1301          *
1302          * NOTE!  THE EMERGENCY PAGER (isep) DOES NOT LAUNDER VNODE-BACKED
1303          *        PAGES.
1304          */
1305
1306         bzero(&marker, sizeof(marker));
1307         marker.flags = PG_FICTITIOUS | PG_MARKER;
1308         marker.busy_count = PBUSY_LOCKED;
1309         marker.queue = PQ_ACTIVE + q;
1310         marker.pc = q;
1311         marker.wire_count = 1;
1312
1313         vm_page_queues_spin_lock(PQ_ACTIVE + q);
1314         TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1315         maxscan = vm_page_queues[PQ_ACTIVE + q].lcnt;
1316
1317         /*
1318          * Queue locked at top of loop to avoid stack marker issues.
1319          */
1320         while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
1321                maxscan-- > 0 && (avail_shortage - delta > 0 ||
1322                                 inactive_shortage > 0))
1323         {
1324                 KKASSERT(m->queue == PQ_ACTIVE + q);
1325                 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl,
1326                              &marker, pageq);
1327                 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
1328                                    &marker, pageq);
1329
1330                 /*
1331                  * Skip marker pages (atomic against other markers to avoid
1332                  * infinite hop-over scans).
1333                  */
1334                 if (m->flags & PG_MARKER)
1335                         continue;
1336
1337                 /*
1338                  * Try to busy the page.  Don't mess with pages which are
1339                  * already busy or reorder them in the queue.
1340                  */
1341                 if (vm_page_busy_try(m, TRUE))
1342                         continue;
1343
1344                 /*
1345                  * Remaining operations run with the page busy and neither
1346                  * the page or the queue will be spin-locked.
1347                  */
1348                 KKASSERT(m->queue == PQ_ACTIVE + q);
1349                 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1350
1351 #if 0
1352                 /*
1353                  * Don't deactivate pages that are held, even if we can
1354                  * busy them.  (XXX why not?)
1355                  */
1356                 if (m->hold_count) {
1357                         vm_page_and_queue_spin_lock(m);
1358                         if (m->queue - m->pc == PQ_ACTIVE) {
1359                                 TAILQ_REMOVE(
1360                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1361                                         m, pageq);
1362                                 TAILQ_INSERT_TAIL(
1363                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1364                                         m, pageq);
1365                         }
1366                         vm_page_and_queue_spin_unlock(m);
1367                         vm_page_wakeup(m);
1368                         goto next;
1369                 }
1370 #endif
1371                 /*
1372                  * We can just remove wired pages from the queue
1373                  */
1374                 if (m->wire_count) {
1375                         vm_page_unqueue_nowakeup(m);
1376                         vm_page_wakeup(m);
1377                         goto next;
1378                 }
1379
1380                 /*
1381                  * The emergency pager ignores vnode-backed pages as these
1382                  * are the pages that probably bricked the main pager.
1383                  */
1384                 if (isep && m->object && m->object->type == OBJT_VNODE) {
1385                         vm_page_and_queue_spin_lock(m);
1386                         if (m->queue - m->pc == PQ_ACTIVE) {
1387                                 TAILQ_REMOVE(
1388                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1389                                         m, pageq);
1390                                 TAILQ_INSERT_TAIL(
1391                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1392                                         m, pageq);
1393                         }
1394                         vm_page_and_queue_spin_unlock(m);
1395                         vm_page_wakeup(m);
1396                         goto next;
1397                 }
1398
1399                 /*
1400                  * The count for pagedaemon pages is done after checking the
1401                  * page for eligibility...
1402                  */
1403                 mycpu->gd_cnt.v_pdpages++;
1404
1405                 /*
1406                  * Check to see "how much" the page has been used and clear
1407                  * the tracking access bits.  If the object has no references
1408                  * don't bother paying the expense.
1409                  */
1410                 actcount = 0;
1411                 if (m->object && m->object->ref_count != 0) {
1412                         if (m->flags & PG_REFERENCED)
1413                                 ++actcount;
1414                         actcount += pmap_ts_referenced(m);
1415                         if (actcount) {
1416                                 m->act_count += ACT_ADVANCE + actcount;
1417                                 if (m->act_count > ACT_MAX)
1418                                         m->act_count = ACT_MAX;
1419                         }
1420                 }
1421                 vm_page_flag_clear(m, PG_REFERENCED);
1422
1423                 /*
1424                  * actcount is only valid if the object ref_count is non-zero.
1425                  * If the page does not have an object, actcount will be zero.
1426                  */
1427                 if (actcount && m->object->ref_count != 0) {
1428                         vm_page_and_queue_spin_lock(m);
1429                         if (m->queue - m->pc == PQ_ACTIVE) {
1430                                 TAILQ_REMOVE(
1431                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1432                                         m, pageq);
1433                                 TAILQ_INSERT_TAIL(
1434                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1435                                         m, pageq);
1436                         }
1437                         vm_page_and_queue_spin_unlock(m);
1438                         vm_page_wakeup(m);
1439                 } else {
1440                         switch(m->object->type) {
1441                         case OBJT_DEFAULT:
1442                         case OBJT_SWAP:
1443                                 m->act_count -= min(m->act_count,
1444                                                     vm_anonmem_decline);
1445                                 break;
1446                         default:
1447                                 m->act_count -= min(m->act_count,
1448                                                     vm_filemem_decline);
1449                                 break;
1450                         }
1451                         if (vm_pageout_algorithm ||
1452                             (m->object == NULL) ||
1453                             (m->object && (m->object->ref_count == 0)) ||
1454                             m->act_count < pass + 1
1455                         ) {
1456                                 /*
1457                                  * Deactivate the page.  If we had a
1458                                  * shortage from our inactive scan try to
1459                                  * free (cache) the page instead.
1460                                  *
1461                                  * Don't just blindly cache the page if
1462                                  * we do not have a shortage from the
1463                                  * inactive scan, that could lead to
1464                                  * gigabytes being moved.
1465                                  */
1466                                 --inactive_shortage;
1467                                 if (avail_shortage - delta > 0 ||
1468                                     (m->object && (m->object->ref_count == 0)))
1469                                 {
1470                                         if (avail_shortage - delta > 0)
1471                                                 ++*recycle_countp;
1472                                         vm_page_protect(m, VM_PROT_NONE);
1473                                         if (m->dirty == 0 &&
1474                                             (m->flags & PG_NEED_COMMIT) == 0 &&
1475                                             avail_shortage - delta > 0) {
1476                                                 vm_page_cache(m);
1477                                         } else {
1478                                                 vm_page_deactivate(m);
1479                                                 vm_page_wakeup(m);
1480                                         }
1481                                 } else {
1482                                         vm_page_deactivate(m);
1483                                         vm_page_wakeup(m);
1484                                 }
1485                                 ++delta;
1486                         } else {
1487                                 vm_page_and_queue_spin_lock(m);
1488                                 if (m->queue - m->pc == PQ_ACTIVE) {
1489                                         TAILQ_REMOVE(
1490                                             &vm_page_queues[PQ_ACTIVE + q].pl,
1491                                             m, pageq);
1492                                         TAILQ_INSERT_TAIL(
1493                                             &vm_page_queues[PQ_ACTIVE + q].pl,
1494                                             m, pageq);
1495                                 }
1496                                 vm_page_and_queue_spin_unlock(m);
1497                                 vm_page_wakeup(m);
1498                         }
1499                 }
1500 next:
1501                 lwkt_yield();
1502                 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1503         }
1504
1505         /*
1506          * Clean out our local marker.
1507          *
1508          * Page queue still spin-locked.
1509          */
1510         TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1511         vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1512
1513         return (delta);
1514 }
1515
1516 /*
1517  * The number of actually free pages can drop down to v_free_reserved,
1518  * we try to build the free count back above v_free_min.  Note that
1519  * vm_paging_needed() also returns TRUE if v_free_count is not at
1520  * least v_free_min so that is the minimum we must build the free
1521  * count to.
1522  *
1523  * We use a slightly higher target to improve hysteresis,
1524  * ((v_free_target + v_free_min) / 2).  Since v_free_target
1525  * is usually the same as v_cache_min this maintains about
1526  * half the pages in the free queue as are in the cache queue,
1527  * providing pretty good pipelining for pageout operation.
1528  *
1529  * The system operator can manipulate vm.v_cache_min and
1530  * vm.v_free_target to tune the pageout demon.  Be sure
1531  * to keep vm.v_free_min < vm.v_free_target.
1532  *
1533  * Note that the original paging target is to get at least
1534  * (free_min + cache_min) into (free + cache).  The slightly
1535  * higher target will shift additional pages from cache to free
1536  * without effecting the original paging target in order to
1537  * maintain better hysteresis and not have the free count always
1538  * be dead-on v_free_min.
1539  *
1540  * NOTE: we are still in a critical section.
1541  *
1542  * Pages moved from PQ_CACHE to totally free are not counted in the
1543  * pages_freed counter.
1544  *
1545  * WARNING! Can be called from two pagedaemon threads simultaneously.
1546  */
1547 static void
1548 vm_pageout_scan_cache(long avail_shortage, int pass,
1549                       long vnodes_skipped, long recycle_count)
1550 {
1551         static int lastkillticks;
1552         struct vm_pageout_scan_info info;
1553         vm_page_t m;
1554         int isep;
1555
1556         isep = (curthread == emergpager);
1557
1558         while (vmstats.v_free_count <
1559                (vmstats.v_free_min + vmstats.v_free_target) / 2) {
1560                 /*
1561                  * This steals some code from vm/vm_page.c
1562                  *
1563                  * Create two rovers and adjust the code to reduce
1564                  * chances of them winding up at the same index (which
1565                  * can cause a lot of contention).
1566                  */
1567                 static int cache_rover[2] = { 0, PQ_L2_MASK / 2 };
1568
1569                 if (((cache_rover[0] ^ cache_rover[1]) & PQ_L2_MASK) == 0)
1570                         goto next_rover;
1571
1572                 m = vm_page_list_find(PQ_CACHE, cache_rover[isep] & PQ_L2_MASK);
1573                 if (m == NULL)
1574                         break;
1575
1576                 /*
1577                  * If the busy attempt fails we can still deactivate the page.
1578                  */
1579                 /* page is returned removed from its queue and spinlocked */
1580                 if (vm_page_busy_try(m, TRUE)) {
1581                         vm_page_deactivate_locked(m);
1582                         vm_page_spin_unlock(m);
1583                         continue;
1584                 }
1585                 vm_page_spin_unlock(m);
1586                 pagedaemon_wakeup();
1587                 lwkt_yield();
1588
1589                 /*
1590                  * Remaining operations run with the page busy and neither
1591                  * the page or the queue will be spin-locked.
1592                  */
1593                 if ((m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) ||
1594                     m->hold_count ||
1595                     m->wire_count) {
1596                         vm_page_deactivate(m);
1597                         vm_page_wakeup(m);
1598                         continue;
1599                 }
1600                 KKASSERT((m->flags & PG_MAPPED) == 0);
1601                 KKASSERT(m->dirty == 0);
1602                 vm_pageout_page_free(m);
1603                 mycpu->gd_cnt.v_dfree++;
1604 next_rover:
1605                 if (isep)
1606                         cache_rover[1] -= PQ_PRIME2;
1607                 else
1608                         cache_rover[0] += PQ_PRIME2;
1609         }
1610
1611 #if !defined(NO_SWAPPING)
1612         /*
1613          * Idle process swapout -- run once per second.
1614          */
1615         if (vm_swap_idle_enabled) {
1616                 static time_t lsec;
1617                 if (time_uptime != lsec) {
1618                         atomic_set_int(&vm_pageout_req_swapout, VM_SWAP_IDLE);
1619                         vm_req_vmdaemon();
1620                         lsec = time_uptime;
1621                 }
1622         }
1623 #endif
1624                 
1625         /*
1626          * If we didn't get enough free pages, and we have skipped a vnode
1627          * in a writeable object, wakeup the sync daemon.  And kick swapout
1628          * if we did not get enough free pages.
1629          */
1630         if (vm_paging_target() > 0) {
1631                 if (vnodes_skipped && vm_page_count_min(0))
1632                         speedup_syncer(NULL);
1633 #if !defined(NO_SWAPPING)
1634                 if (vm_swap_enabled && vm_page_count_target()) {
1635                         atomic_set_int(&vm_pageout_req_swapout, VM_SWAP_NORMAL);
1636                         vm_req_vmdaemon();
1637                 }
1638 #endif
1639         }
1640
1641         /*
1642          * Handle catastrophic conditions.  Under good conditions we should
1643          * be at the target, well beyond our minimum.  If we could not even
1644          * reach our minimum the system is under heavy stress.  But just being
1645          * under heavy stress does not trigger process killing.
1646          *
1647          * We consider ourselves to have run out of memory if the swap pager
1648          * is full and avail_shortage is still positive.  The secondary check
1649          * ensures that we do not kill processes if the instantanious
1650          * availability is good, even if the pageout demon pass says it
1651          * couldn't get to the target.
1652          *
1653          * NOTE!  THE EMERGENCY PAGER (isep) DOES NOT HANDLE SWAP FULL
1654          *        SITUATIONS.
1655          */
1656         if (swap_pager_almost_full &&
1657             pass > 0 &&
1658             isep == 0 &&
1659             (vm_page_count_min(recycle_count) || avail_shortage > 0)) {
1660                 kprintf("Warning: system low on memory+swap "
1661                         "shortage %ld for %d ticks!\n",
1662                         avail_shortage, ticks - swap_fail_ticks);
1663                 if (bootverbose)
1664                 kprintf("Metrics: spaf=%d spf=%d pass=%d "
1665                         "avail=%ld target=%ld last=%u\n",
1666                         swap_pager_almost_full,
1667                         swap_pager_full,
1668                         pass,
1669                         avail_shortage,
1670                         vm_paging_target(),
1671                         (unsigned int)(ticks - lastkillticks));
1672         }
1673         if (swap_pager_full &&
1674             pass > 1 &&
1675             isep == 0 &&
1676             avail_shortage > 0 &&
1677             vm_paging_target() > 0 &&
1678             (unsigned int)(ticks - lastkillticks) >= hz) {
1679                 /*
1680                  * Kill something, maximum rate once per second to give
1681                  * the process time to free up sufficient memory.
1682                  */
1683                 lastkillticks = ticks;
1684                 info.bigproc = NULL;
1685                 info.bigsize = 0;
1686                 allproc_scan(vm_pageout_scan_callback, &info, 0);
1687                 if (info.bigproc != NULL) {
1688                         kprintf("Try to kill process %d %s\n",
1689                                 info.bigproc->p_pid, info.bigproc->p_comm);
1690                         info.bigproc->p_nice = PRIO_MIN;
1691                         info.bigproc->p_usched->resetpriority(
1692                                 FIRST_LWP_IN_PROC(info.bigproc));
1693                         atomic_set_int(&info.bigproc->p_flags, P_LOWMEMKILL);
1694                         killproc(info.bigproc, "out of swap space");
1695                         wakeup(&vmstats.v_free_count);
1696                         PRELE(info.bigproc);
1697                 }
1698         }
1699 }
1700
1701 static int
1702 vm_pageout_scan_callback(struct proc *p, void *data)
1703 {
1704         struct vm_pageout_scan_info *info = data;
1705         vm_offset_t size;
1706
1707         /*
1708          * Never kill system processes or init.  If we have configured swap
1709          * then try to avoid killing low-numbered pids.
1710          */
1711         if ((p->p_flags & P_SYSTEM) || (p->p_pid == 1) ||
1712             ((p->p_pid < 48) && (vm_swap_size != 0))) {
1713                 return (0);
1714         }
1715
1716         lwkt_gettoken(&p->p_token);
1717
1718         /*
1719          * if the process is in a non-running type state,
1720          * don't touch it.
1721          */
1722         if (p->p_stat != SACTIVE && p->p_stat != SSTOP && p->p_stat != SCORE) {
1723                 lwkt_reltoken(&p->p_token);
1724                 return (0);
1725         }
1726
1727         /*
1728          * Get the approximate process size.  Note that anonymous pages
1729          * with backing swap will be counted twice, but there should not
1730          * be too many such pages due to the stress the VM system is
1731          * under at this point.
1732          */
1733         size = vmspace_anonymous_count(p->p_vmspace) +
1734                 vmspace_swap_count(p->p_vmspace);
1735
1736         /*
1737          * If the this process is bigger than the biggest one
1738          * remember it.
1739          */
1740         if (info->bigsize < size) {
1741                 if (info->bigproc)
1742                         PRELE(info->bigproc);
1743                 PHOLD(p);
1744                 info->bigproc = p;
1745                 info->bigsize = size;
1746         }
1747         lwkt_reltoken(&p->p_token);
1748         lwkt_yield();
1749
1750         return(0);
1751 }
1752
1753 /*
1754  * This old guy slowly walks PQ_HOLD looking for pages which need to be
1755  * moved back to PQ_FREE.  It is possible for pages to accumulate here
1756  * when vm_page_free() races against vm_page_unhold(), resulting in a
1757  * page being left on a PQ_HOLD queue with hold_count == 0.
1758  *
1759  * It is easier to handle this edge condition here, in non-critical code,
1760  * rather than enforce a spin-lock for every 1->0 transition in
1761  * vm_page_unhold().
1762  *
1763  * NOTE: TAILQ_FOREACH becomes invalid the instant we unlock the queue.
1764  */
1765 static void
1766 vm_pageout_scan_hold(int q)
1767 {
1768         vm_page_t m;
1769
1770         vm_page_queues_spin_lock(PQ_HOLD + q);
1771         TAILQ_FOREACH(m, &vm_page_queues[PQ_HOLD + q].pl, pageq) {
1772                 if (m->flags & PG_MARKER)
1773                         continue;
1774
1775                 /*
1776                  * Process one page and return
1777                  */
1778                 if (m->hold_count)
1779                         break;
1780                 kprintf("DEBUG: pageout HOLD->FREE %p\n", m);
1781                 vm_page_hold(m);
1782                 vm_page_queues_spin_unlock(PQ_HOLD + q);
1783                 vm_page_unhold(m);      /* reprocess */
1784                 return;
1785         }
1786         vm_page_queues_spin_unlock(PQ_HOLD + q);
1787 }
1788
1789 /*
1790  * This routine tries to maintain the pseudo LRU active queue,
1791  * so that during long periods of time where there is no paging,
1792  * that some statistic accumulation still occurs.  This code
1793  * helps the situation where paging just starts to occur.
1794  */
1795 static void
1796 vm_pageout_page_stats(int q)
1797 {
1798         static int fullintervalcount = 0;
1799         struct vm_page marker;
1800         vm_page_t m;
1801         long pcount, tpcount;           /* Number of pages to check */
1802         long page_shortage;
1803
1804         page_shortage = (vmstats.v_inactive_target + vmstats.v_cache_max +
1805                          vmstats.v_free_min) -
1806                         (vmstats.v_free_count + vmstats.v_inactive_count +
1807                          vmstats.v_cache_count);
1808
1809         if (page_shortage <= 0)
1810                 return;
1811
1812         pcount = vm_page_queues[PQ_ACTIVE + q].lcnt;
1813         fullintervalcount += vm_pageout_stats_interval;
1814         if (fullintervalcount < vm_pageout_full_stats_interval) {
1815                 tpcount = (vm_pageout_stats_max * pcount) /
1816                           vmstats.v_page_count + 1;
1817                 if (pcount > tpcount)
1818                         pcount = tpcount;
1819         } else {
1820                 fullintervalcount = 0;
1821         }
1822
1823         bzero(&marker, sizeof(marker));
1824         marker.flags = PG_FICTITIOUS | PG_MARKER;
1825         marker.busy_count = PBUSY_LOCKED;
1826         marker.queue = PQ_ACTIVE + q;
1827         marker.pc = q;
1828         marker.wire_count = 1;
1829
1830         vm_page_queues_spin_lock(PQ_ACTIVE + q);
1831         TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1832
1833         /*
1834          * Queue locked at top of loop to avoid stack marker issues.
1835          */
1836         while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
1837                pcount-- > 0)
1838         {
1839                 int actcount;
1840
1841                 KKASSERT(m->queue == PQ_ACTIVE + q);
1842                 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1843                 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
1844                                    &marker, pageq);
1845
1846                 /*
1847                  * Skip marker pages (atomic against other markers to avoid
1848                  * infinite hop-over scans).
1849                  */
1850                 if (m->flags & PG_MARKER)
1851                         continue;
1852
1853                 /*
1854                  * Ignore pages we can't busy
1855                  */
1856                 if (vm_page_busy_try(m, TRUE))
1857                         continue;
1858
1859                 /*
1860                  * Remaining operations run with the page busy and neither
1861                  * the page or the queue will be spin-locked.
1862                  */
1863                 KKASSERT(m->queue == PQ_ACTIVE + q);
1864                 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1865
1866                 /*
1867                  * We can just remove wired pages from the queue
1868                  */
1869                 if (m->wire_count) {
1870                         vm_page_unqueue_nowakeup(m);
1871                         vm_page_wakeup(m);
1872                         goto next;
1873                 }
1874
1875
1876                 /*
1877                  * We now have a safely busied page, the page and queue
1878                  * spinlocks have been released.
1879                  *
1880                  * Ignore held and wired pages
1881                  */
1882                 if (m->hold_count || m->wire_count) {
1883                         vm_page_wakeup(m);
1884                         goto next;
1885                 }
1886
1887                 /*
1888                  * Calculate activity
1889                  */
1890                 actcount = 0;
1891                 if (m->flags & PG_REFERENCED) {
1892                         vm_page_flag_clear(m, PG_REFERENCED);
1893                         actcount += 1;
1894                 }
1895                 actcount += pmap_ts_referenced(m);
1896
1897                 /*
1898                  * Update act_count and move page to end of queue.
1899                  */
1900                 if (actcount) {
1901                         m->act_count += ACT_ADVANCE + actcount;
1902                         if (m->act_count > ACT_MAX)
1903                                 m->act_count = ACT_MAX;
1904                         vm_page_and_queue_spin_lock(m);
1905                         if (m->queue - m->pc == PQ_ACTIVE) {
1906                                 TAILQ_REMOVE(
1907                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1908                                         m, pageq);
1909                                 TAILQ_INSERT_TAIL(
1910                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1911                                         m, pageq);
1912                         }
1913                         vm_page_and_queue_spin_unlock(m);
1914                         vm_page_wakeup(m);
1915                         goto next;
1916                 }
1917
1918                 if (m->act_count == 0) {
1919                         /*
1920                          * We turn off page access, so that we have
1921                          * more accurate RSS stats.  We don't do this
1922                          * in the normal page deactivation when the
1923                          * system is loaded VM wise, because the
1924                          * cost of the large number of page protect
1925                          * operations would be higher than the value
1926                          * of doing the operation.
1927                          *
1928                          * We use the marker to save our place so
1929                          * we can release the spin lock.  both (m)
1930                          * and (next) will be invalid.
1931                          */
1932                         vm_page_protect(m, VM_PROT_NONE);
1933                         vm_page_deactivate(m);
1934                 } else {
1935                         m->act_count -= min(m->act_count, ACT_DECLINE);
1936                         vm_page_and_queue_spin_lock(m);
1937                         if (m->queue - m->pc == PQ_ACTIVE) {
1938                                 TAILQ_REMOVE(
1939                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1940                                         m, pageq);
1941                                 TAILQ_INSERT_TAIL(
1942                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1943                                         m, pageq);
1944                         }
1945                         vm_page_and_queue_spin_unlock(m);
1946                 }
1947                 vm_page_wakeup(m);
1948 next:
1949                 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1950         }
1951
1952         /*
1953          * Remove our local marker
1954          *
1955          * Page queue still spin-locked.
1956          */
1957         TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1958         vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1959 }
1960
1961 static int
1962 vm_pageout_free_page_calc(vm_size_t count)
1963 {
1964         if (count < vmstats.v_page_count)
1965                  return 0;
1966         /*
1967          * free_reserved needs to include enough for the largest swap pager
1968          * structures plus enough for any pv_entry structs when paging.
1969          *
1970          * v_free_min           normal allocations
1971          * v_free_reserved      system allocations
1972          * v_pageout_free_min   allocations by pageout daemon
1973          * v_interrupt_free_min low level allocations (e.g swap structures)
1974          */
1975         if (vmstats.v_page_count > 1024)
1976                 vmstats.v_free_min = 64 + (vmstats.v_page_count - 1024) / 200;
1977         else
1978                 vmstats.v_free_min = 64;
1979
1980         /*
1981          * Make sure the vmmeter slop can't blow out our global minimums.
1982          *
1983          * However, to accomodate weird configurations (vkernels with many
1984          * cpus and little memory, or artifically reduced hw.physmem), do
1985          * not allow v_free_min to exceed 1/20 of ram or the pageout demon
1986          * will go out of control.
1987          */
1988         if (vmstats.v_free_min < VMMETER_SLOP_COUNT * ncpus * 10)
1989                 vmstats.v_free_min = VMMETER_SLOP_COUNT * ncpus * 10;
1990         if (vmstats.v_free_min > vmstats.v_page_count / 20)
1991                 vmstats.v_free_min = vmstats.v_page_count / 20;
1992
1993         vmstats.v_free_reserved = vmstats.v_free_min * 4 / 8 + 7;
1994         vmstats.v_free_severe = vmstats.v_free_min * 4 / 8 + 0;
1995         vmstats.v_pageout_free_min = vmstats.v_free_min * 2 / 8 + 7;
1996         vmstats.v_interrupt_free_min = vmstats.v_free_min * 1 / 8 + 7;
1997
1998         return 1;
1999 }
2000
2001
2002 /*
2003  * vm_pageout is the high level pageout daemon.  TWO kernel threads run
2004  * this daemon, the primary pageout daemon and the emergency pageout daemon.
2005  *
2006  * The emergency pageout daemon takes over when the primary pageout daemon
2007  * deadlocks.  The emergency pageout daemon ONLY pages out to swap, thus
2008  * avoiding the many low-memory deadlocks which can occur when paging out
2009  * to VFS's.
2010  */
2011 static void
2012 vm_pageout_thread(void)
2013 {
2014         int pass;
2015         int q;
2016         int q1iterator = 0;
2017         int q2iterator = 0;
2018         int q3iterator = 0;
2019         int isep;
2020
2021         curthread->td_flags |= TDF_SYSTHREAD;
2022
2023         /*
2024          * We only need to setup once.
2025          */
2026         isep = 0;
2027         if (curthread == emergpager) {
2028                 isep = 1;
2029                 goto skip_setup;
2030         }
2031
2032         /*
2033          * Initialize some paging parameters.
2034          */
2035         vm_pageout_free_page_calc(vmstats.v_page_count);
2036
2037         /*
2038          * v_free_target and v_cache_min control pageout hysteresis.  Note
2039          * that these are more a measure of the VM cache queue hysteresis
2040          * then the VM free queue.  Specifically, v_free_target is the
2041          * high water mark (free+cache pages).
2042          *
2043          * v_free_reserved + v_cache_min (mostly means v_cache_min) is the
2044          * low water mark, while v_free_min is the stop.  v_cache_min must
2045          * be big enough to handle memory needs while the pageout daemon
2046          * is signalled and run to free more pages.
2047          */
2048         if (vmstats.v_free_count > 6144)
2049                 vmstats.v_free_target = 4 * vmstats.v_free_min +
2050                                         vmstats.v_free_reserved;
2051         else
2052                 vmstats.v_free_target = 2 * vmstats.v_free_min +
2053                                         vmstats.v_free_reserved;
2054
2055         /*
2056          * NOTE: With the new buffer cache b_act_count we want the default
2057          *       inactive target to be a percentage of available memory.
2058          *
2059          *       The inactive target essentially determines the minimum
2060          *       number of 'temporary' pages capable of caching one-time-use
2061          *       files when the VM system is otherwise full of pages
2062          *       belonging to multi-time-use files or active program data.
2063          *
2064          * NOTE: The inactive target is aggressively persued only if the
2065          *       inactive queue becomes too small.  If the inactive queue
2066          *       is large enough to satisfy page movement to free+cache
2067          *       then it is repopulated more slowly from the active queue.
2068          *       This allows a general inactive_target default to be set.
2069          *
2070          *       There is an issue here for processes which sit mostly idle
2071          *       'overnight', such as sshd, tcsh, and X.  Any movement from
2072          *       the active queue will eventually cause such pages to
2073          *       recycle eventually causing a lot of paging in the morning.
2074          *       To reduce the incidence of this pages cycled out of the
2075          *       buffer cache are moved directly to the inactive queue if
2076          *       they were only used once or twice.
2077          *
2078          *       The vfs.vm_cycle_point sysctl can be used to adjust this.
2079          *       Increasing the value (up to 64) increases the number of
2080          *       buffer recyclements which go directly to the inactive queue.
2081          */
2082         if (vmstats.v_free_count > 2048) {
2083                 vmstats.v_cache_min = vmstats.v_free_target;
2084                 vmstats.v_cache_max = 2 * vmstats.v_cache_min;
2085         } else {
2086                 vmstats.v_cache_min = 0;
2087                 vmstats.v_cache_max = 0;
2088         }
2089         vmstats.v_inactive_target = vmstats.v_free_count / 4;
2090
2091         /* XXX does not really belong here */
2092         if (vm_page_max_wired == 0)
2093                 vm_page_max_wired = vmstats.v_free_count / 3;
2094
2095         if (vm_pageout_stats_max == 0)
2096                 vm_pageout_stats_max = vmstats.v_free_target;
2097
2098         /*
2099          * Set interval in seconds for stats scan.
2100          */
2101         if (vm_pageout_stats_interval == 0)
2102                 vm_pageout_stats_interval = 5;
2103         if (vm_pageout_full_stats_interval == 0)
2104                 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
2105         
2106
2107         /*
2108          * Set maximum free per pass
2109          */
2110         if (vm_pageout_stats_free_max == 0)
2111                 vm_pageout_stats_free_max = 5;
2112
2113         swap_pager_swap_init();
2114         pass = 0;
2115
2116         atomic_swap_int(&sequence_emerg_pager, 1);
2117         wakeup(&sequence_emerg_pager);
2118
2119 skip_setup:
2120         /*
2121          * Sequence emergency pager startup
2122          */
2123         if (isep) {
2124                 while (sequence_emerg_pager == 0)
2125                         tsleep(&sequence_emerg_pager, 0, "pstartup", hz);
2126         }
2127
2128         /*
2129          * The pageout daemon is never done, so loop forever.
2130          *
2131          * WARNING!  This code is being executed by two kernel threads
2132          *           potentially simultaneously.
2133          */
2134         while (TRUE) {
2135                 int error;
2136                 long avail_shortage;
2137                 long inactive_shortage;
2138                 long vnodes_skipped = 0;
2139                 long recycle_count = 0;
2140                 long tmp;
2141
2142                 /*
2143                  * Wait for an action request.  If we timeout check to
2144                  * see if paging is needed (in case the normal wakeup
2145                  * code raced us).
2146                  */
2147                 if (isep) {
2148                         /*
2149                          * Emergency pagedaemon monitors the primary
2150                          * pagedaemon while vm_pages_needed != 0.
2151                          *
2152                          * The emergency pagedaemon only runs if VM paging
2153                          * is needed and the primary pagedaemon has not
2154                          * updated vm_pagedaemon_time for more than 2 seconds.
2155                          */
2156                         if (vm_pages_needed)
2157                                 tsleep(&vm_pagedaemon_time, 0, "psleep", hz);
2158                         else
2159                                 tsleep(&vm_pagedaemon_time, 0, "psleep", hz*10);
2160                         if (vm_pages_needed == 0) {
2161                                 pass = 0;
2162                                 continue;
2163                         }
2164                         if ((int)(ticks - vm_pagedaemon_time) < hz * 2) {
2165                                 pass = 0;
2166                                 continue;
2167                         }
2168                 } else {
2169                         /*
2170                          * Primary pagedaemon
2171                          *
2172                          * NOTE: We unconditionally cleanup PQ_HOLD even
2173                          *       when there is no work to do.
2174                          */
2175                         vm_pageout_scan_hold(q3iterator & PQ_L2_MASK);
2176                         ++q3iterator;
2177
2178                         if (vm_pages_needed == 0) {
2179                                 error = tsleep(&vm_pages_needed,
2180                                                0, "psleep",
2181                                                vm_pageout_stats_interval * hz);
2182                                 if (error &&
2183                                     vm_paging_needed() == 0 &&
2184                                     vm_pages_needed == 0) {
2185                                         for (q = 0; q < PQ_L2_SIZE; ++q)
2186                                                 vm_pageout_page_stats(q);
2187                                         continue;
2188                                 }
2189                                 vm_pagedaemon_time = ticks;
2190                                 vm_pages_needed = 1;
2191
2192                                 /*
2193                                  * Wake the emergency pagedaemon up so it
2194                                  * can monitor us.  It will automatically
2195                                  * go back into a long sleep when
2196                                  * vm_pages_needed returns to 0.
2197                                  */
2198                                 wakeup(&vm_pagedaemon_time);
2199                         }
2200                 }
2201
2202                 mycpu->gd_cnt.v_pdwakeups++;
2203
2204                 /*
2205                  * Scan for INACTIVE->CLEAN/PAGEOUT
2206                  *
2207                  * This routine tries to avoid thrashing the system with
2208                  * unnecessary activity.
2209                  *
2210                  * Calculate our target for the number of free+cache pages we
2211                  * want to get to.  This is higher then the number that causes
2212                  * allocations to stall (severe) in order to provide hysteresis,
2213                  * and if we don't make it all the way but get to the minimum
2214                  * we're happy.  Goose it a bit if there are multiple requests
2215                  * for memory.
2216                  *
2217                  * Don't reduce avail_shortage inside the loop or the
2218                  * PQAVERAGE() calculation will break.
2219                  *
2220                  * NOTE! deficit is differentiated from avail_shortage as
2221                  *       REQUIRING at least (deficit) pages to be cleaned,
2222                  *       even if the page queues are in good shape.  This
2223                  *       is used primarily for handling per-process
2224                  *       RLIMIT_RSS and may also see small values when
2225                  *       processes block due to low memory.
2226                  */
2227                 vmstats_rollup();
2228                 if (isep == 0)
2229                         vm_pagedaemon_time = ticks;
2230                 avail_shortage = vm_paging_target() + vm_pageout_deficit;
2231                 vm_pageout_deficit = 0;
2232
2233                 if (avail_shortage > 0) {
2234                         long delta = 0;
2235                         int qq;
2236
2237                         qq = q1iterator;
2238                         for (q = 0; q < PQ_L2_SIZE; ++q) {
2239                                 delta += vm_pageout_scan_inactive(
2240                                             pass,
2241                                             qq & PQ_L2_MASK,
2242                                             PQAVERAGE(avail_shortage),
2243                                             &vnodes_skipped);
2244                                 if (isep)
2245                                         --qq;
2246                                 else
2247                                         ++qq;
2248                                 if (avail_shortage - delta <= 0)
2249                                         break;
2250                         }
2251                         avail_shortage -= delta;
2252                         q1iterator = qq;
2253                 }
2254
2255                 /*
2256                  * Figure out how many active pages we must deactivate.  If
2257                  * we were able to reach our target with just the inactive
2258                  * scan above we limit the number of active pages we
2259                  * deactivate to reduce unnecessary work.
2260                  */
2261                 vmstats_rollup();
2262                 if (isep == 0)
2263                         vm_pagedaemon_time = ticks;
2264                 inactive_shortage = vmstats.v_inactive_target -
2265                                     vmstats.v_inactive_count;
2266
2267                 /*
2268                  * If we were unable to free sufficient inactive pages to
2269                  * satisfy the free/cache queue requirements then simply
2270                  * reaching the inactive target may not be good enough.
2271                  * Try to deactivate pages in excess of the target based
2272                  * on the shortfall.
2273                  *
2274                  * However to prevent thrashing the VM system do not
2275                  * deactivate more than an additional 1/10 the inactive
2276                  * target's worth of active pages.
2277                  */
2278                 if (avail_shortage > 0) {
2279                         tmp = avail_shortage * 2;
2280                         if (tmp > vmstats.v_inactive_target / 10)
2281                                 tmp = vmstats.v_inactive_target / 10;
2282                         inactive_shortage += tmp;
2283                 }
2284
2285                 /*
2286                  * Only trigger a pmap cleanup on inactive shortage.
2287                  */
2288                 if (isep == 0 && inactive_shortage > 0) {
2289                         pmap_collect();
2290                 }
2291
2292                 /*
2293                  * Scan for ACTIVE->INACTIVE
2294                  *
2295                  * Only trigger on inactive shortage.  Triggering on
2296                  * avail_shortage can starve the active queue with
2297                  * unnecessary active->inactive transitions and destroy
2298                  * performance.
2299                  *
2300                  * If this is the emergency pager, always try to move
2301                  * a few pages from active to inactive because the inactive
2302                  * queue might have enough pages, but not enough anonymous
2303                  * pages.
2304                  */
2305                 if (isep && inactive_shortage < vm_emerg_launder)
2306                         inactive_shortage = vm_emerg_launder;
2307
2308                 if (/*avail_shortage > 0 ||*/ inactive_shortage > 0) {
2309                         long delta = 0;
2310                         int qq;
2311
2312                         qq = q2iterator;
2313                         for (q = 0; q < PQ_L2_SIZE; ++q) {
2314                                 delta += vm_pageout_scan_active(
2315                                                 pass,
2316                                                 qq & PQ_L2_MASK,
2317                                                 PQAVERAGE(avail_shortage),
2318                                                 PQAVERAGE(inactive_shortage),
2319                                                 &recycle_count);
2320                                 if (isep)
2321                                         --qq;
2322                                 else
2323                                         ++qq;
2324                                 if (inactive_shortage - delta <= 0 &&
2325                                     avail_shortage - delta <= 0) {
2326                                         break;
2327                                 }
2328                         }
2329                         inactive_shortage -= delta;
2330                         avail_shortage -= delta;
2331                         q2iterator = qq;
2332                 }
2333
2334                 /*
2335                  * Scan for CACHE->FREE
2336                  *
2337                  * Finally free enough cache pages to meet our free page
2338                  * requirement and take more drastic measures if we are
2339                  * still in trouble.
2340                  */
2341                 vmstats_rollup();
2342                 if (isep == 0)
2343                         vm_pagedaemon_time = ticks;
2344                 vm_pageout_scan_cache(avail_shortage, pass,
2345                                       vnodes_skipped, recycle_count);
2346
2347                 /*
2348                  * Wait for more work.
2349                  */
2350                 if (avail_shortage > 0) {
2351                         ++pass;
2352                         if (pass < 10 && vm_pages_needed > 1) {
2353                                 /*
2354                                  * Normal operation, additional processes
2355                                  * have already kicked us.  Retry immediately
2356                                  * unless swap space is completely full in
2357                                  * which case delay a bit.
2358                                  */
2359                                 if (swap_pager_full) {
2360                                         tsleep(&vm_pages_needed, 0, "pdelay",
2361                                                 hz / 5);
2362                                 } /* else immediate retry */
2363                         } else if (pass < 10) {
2364                                 /*
2365                                  * Normal operation, fewer processes.  Delay
2366                                  * a bit but allow wakeups.  vm_pages_needed
2367                                  * is only adjusted against the primary
2368                                  * pagedaemon here.
2369                                  */
2370                                 if (isep == 0)
2371                                         vm_pages_needed = 0;
2372                                 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
2373                                 if (isep == 0)
2374                                         vm_pages_needed = 1;
2375                         } else if (swap_pager_full == 0) {
2376                                 /*
2377                                  * We've taken too many passes, forced delay.
2378                                  */
2379                                 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
2380                         } else {
2381                                 /*
2382                                  * Running out of memory, catastrophic
2383                                  * back-off to one-second intervals.
2384                                  */
2385                                 tsleep(&vm_pages_needed, 0, "pdelay", hz);
2386                         }
2387                 } else if (vm_pages_needed) {
2388                         /*
2389                          * Interlocked wakeup of waiters (non-optional).
2390                          *
2391                          * Similar to vm_page_free_wakeup() in vm_page.c,
2392                          * wake
2393                          */
2394                         pass = 0;
2395                         if (!vm_page_count_min(vm_page_free_hysteresis) ||
2396                             !vm_page_count_target()) {
2397                                 vm_pages_needed = 0;
2398                                 wakeup(&vmstats.v_free_count);
2399                         }
2400                 } else {
2401                         pass = 0;
2402                 }
2403         }
2404 }
2405
2406 static struct kproc_desc pg1_kp = {
2407         "pagedaemon",
2408         vm_pageout_thread,
2409         &pagethread
2410 };
2411 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &pg1_kp);
2412
2413 static struct kproc_desc pg2_kp = {
2414         "emergpager",
2415         vm_pageout_thread,
2416         &emergpager
2417 };
2418 SYSINIT(emergpager, SI_SUB_KTHREAD_PAGE, SI_ORDER_ANY, kproc_start, &pg2_kp);
2419
2420
2421 /*
2422  * Called after allocating a page out of the cache or free queue
2423  * to possibly wake the pagedaemon up to replentish our supply.
2424  *
2425  * We try to generate some hysteresis by waking the pagedaemon up
2426  * when our free+cache pages go below the free_min+cache_min level.
2427  * The pagedaemon tries to get the count back up to at least the
2428  * minimum, and through to the target level if possible.
2429  *
2430  * If the pagedaemon is already active bump vm_pages_needed as a hint
2431  * that there are even more requests pending.
2432  *
2433  * SMP races ok?
2434  * No requirements.
2435  */
2436 void
2437 pagedaemon_wakeup(void)
2438 {
2439         if (vm_paging_needed() && curthread != pagethread) {
2440                 if (vm_pages_needed == 0) {
2441                         vm_pages_needed = 1;    /* SMP race ok */
2442                         wakeup(&vm_pages_needed);
2443                 } else if (vm_page_count_min(0)) {
2444                         ++vm_pages_needed;      /* SMP race ok */
2445                 }
2446         }
2447 }
2448
2449 #if !defined(NO_SWAPPING)
2450
2451 /*
2452  * SMP races ok?
2453  * No requirements.
2454  */
2455 static void
2456 vm_req_vmdaemon(void)
2457 {
2458         static int lastrun = 0;
2459
2460         if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
2461                 wakeup(&vm_daemon_needed);
2462                 lastrun = ticks;
2463         }
2464 }
2465
2466 static int vm_daemon_callback(struct proc *p, void *data __unused);
2467
2468 /*
2469  * No requirements.
2470  */
2471 static void
2472 vm_daemon(void)
2473 {
2474         int req_swapout;
2475
2476         while (TRUE) {
2477                 tsleep(&vm_daemon_needed, 0, "psleep", 0);
2478                 req_swapout = atomic_swap_int(&vm_pageout_req_swapout, 0);
2479
2480                 /*
2481                  * forced swapouts
2482                  */
2483                 if (req_swapout)
2484                         swapout_procs(vm_pageout_req_swapout);
2485
2486                 /*
2487                  * scan the processes for exceeding their rlimits or if
2488                  * process is swapped out -- deactivate pages
2489                  */
2490                 allproc_scan(vm_daemon_callback, NULL, 0);
2491         }
2492 }
2493
2494 static int
2495 vm_daemon_callback(struct proc *p, void *data __unused)
2496 {
2497         struct vmspace *vm;
2498         vm_pindex_t limit, size;
2499
2500         /*
2501          * if this is a system process or if we have already
2502          * looked at this process, skip it.
2503          */
2504         lwkt_gettoken(&p->p_token);
2505
2506         if (p->p_flags & (P_SYSTEM | P_WEXIT)) {
2507                 lwkt_reltoken(&p->p_token);
2508                 return (0);
2509         }
2510
2511         /*
2512          * if the process is in a non-running type state,
2513          * don't touch it.
2514          */
2515         if (p->p_stat != SACTIVE && p->p_stat != SSTOP && p->p_stat != SCORE) {
2516                 lwkt_reltoken(&p->p_token);
2517                 return (0);
2518         }
2519
2520         /*
2521          * get a limit
2522          */
2523         limit = OFF_TO_IDX(qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
2524                                 p->p_rlimit[RLIMIT_RSS].rlim_max));
2525
2526         /*
2527          * let processes that are swapped out really be
2528          * swapped out.  Set the limit to nothing to get as
2529          * many pages out to swap as possible.
2530          */
2531         if (p->p_flags & P_SWAPPEDOUT)
2532                 limit = 0;
2533
2534         vm = p->p_vmspace;
2535         vmspace_hold(vm);
2536         size = pmap_resident_tlnw_count(&vm->vm_pmap);
2537         if (limit >= 0 && size > 4096 &&
2538             size - 4096 >= limit && vm_pageout_memuse_mode >= 1) {
2539                 vm_pageout_map_deactivate_pages(&vm->vm_map, limit);
2540         }
2541         vmspace_drop(vm);
2542
2543         lwkt_reltoken(&p->p_token);
2544
2545         return (0);
2546 }
2547
2548 #endif