071101ddb045a5fd17f4b89b4d50872ae85558c7
[dragonfly.git] / sys / vm / vm_pageout.c
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * The Mach Operating System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *      from: @(#)vm_pageout.c  7.4 (Berkeley) 5/7/91
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $FreeBSD: src/sys/vm/vm_pageout.c,v 1.151.2.15 2002/12/29 18:21:04 dillon Exp $
65  */
66
67 /*
68  *      The proverbial page-out daemon.
69  */
70
71 #include "opt_vm.h"
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/kernel.h>
75 #include <sys/proc.h>
76 #include <sys/kthread.h>
77 #include <sys/resourcevar.h>
78 #include <sys/signalvar.h>
79 #include <sys/vnode.h>
80 #include <sys/vmmeter.h>
81 #include <sys/sysctl.h>
82
83 #include <vm/vm.h>
84 #include <vm/vm_param.h>
85 #include <sys/lock.h>
86 #include <vm/vm_object.h>
87 #include <vm/vm_page.h>
88 #include <vm/vm_map.h>
89 #include <vm/vm_pageout.h>
90 #include <vm/vm_pager.h>
91 #include <vm/swap_pager.h>
92 #include <vm/vm_extern.h>
93
94 #include <sys/thread2.h>
95 #include <sys/spinlock2.h>
96 #include <vm/vm_page2.h>
97
98 /*
99  * System initialization
100  */
101
102 /* the kernel process "vm_pageout"*/
103 static int vm_pageout_page(vm_page_t m, int *max_launderp,
104                            int *vnodes_skippedp, struct vnode **vpfailedp,
105                            int pass, int vmflush_flags);
106 static int vm_pageout_clean_helper (vm_page_t, int);
107 static int vm_pageout_free_page_calc (vm_size_t count);
108 static void vm_pageout_page_free(vm_page_t m) ;
109 struct thread *pagethread;
110
111 #if !defined(NO_SWAPPING)
112 /* the kernel process "vm_daemon"*/
113 static void vm_daemon (void);
114 static struct   thread *vmthread;
115
116 static struct kproc_desc vm_kp = {
117         "vmdaemon",
118         vm_daemon,
119         &vmthread
120 };
121 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp);
122 #endif
123
124 int vm_pages_needed = 0;        /* Event on which pageout daemon sleeps */
125 int vm_pageout_deficit = 0;     /* Estimated number of pages deficit */
126 int vm_pageout_pages_needed = 0;/* pageout daemon needs pages */
127 int vm_page_free_hysteresis = 16;
128
129 #if !defined(NO_SWAPPING)
130 static int vm_pageout_req_swapout;
131 static int vm_daemon_needed;
132 #endif
133 static int vm_max_launder = 4096;
134 static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
135 static int vm_pageout_full_stats_interval = 0;
136 static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0;
137 static int defer_swap_pageouts=0;
138 static int disable_swap_pageouts=0;
139 static u_int vm_anonmem_decline = ACT_DECLINE;
140 static u_int vm_filemem_decline = ACT_DECLINE * 2;
141
142 #if defined(NO_SWAPPING)
143 static int vm_swap_enabled=0;
144 static int vm_swap_idle_enabled=0;
145 #else
146 static int vm_swap_enabled=1;
147 static int vm_swap_idle_enabled=0;
148 #endif
149 int vm_pageout_memuse_mode=1;   /* 0-disable, 1-passive, 2-active swp*/
150
151 SYSCTL_UINT(_vm, VM_PAGEOUT_ALGORITHM, anonmem_decline,
152         CTLFLAG_RW, &vm_anonmem_decline, 0, "active->inactive anon memory");
153
154 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, filemem_decline,
155         CTLFLAG_RW, &vm_filemem_decline, 0, "active->inactive file cache");
156
157 SYSCTL_INT(_vm, OID_AUTO, page_free_hysteresis,
158         CTLFLAG_RW, &vm_page_free_hysteresis, 0,
159         "Free more pages than the minimum required");
160
161 SYSCTL_INT(_vm, OID_AUTO, max_launder,
162         CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout");
163
164 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
165         CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
166
167 SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
168         CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
169
170 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
171         CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
172
173 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
174         CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented");
175 SYSCTL_INT(_vm, OID_AUTO, pageout_memuse_mode,
176         CTLFLAG_RW, &vm_pageout_memuse_mode, 0, "memoryuse resource mode");
177
178 #if defined(NO_SWAPPING)
179 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
180         CTLFLAG_RD, &vm_swap_enabled, 0, "");
181 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
182         CTLFLAG_RD, &vm_swap_idle_enabled, 0, "");
183 #else
184 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
185         CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
186 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
187         CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
188 #endif
189
190 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
191         CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
192
193 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
194         CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
195
196 static int pageout_lock_miss;
197 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
198         CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
199
200 int vm_page_max_wired;          /* XXX max # of wired pages system-wide */
201
202 #if !defined(NO_SWAPPING)
203 static void vm_req_vmdaemon (void);
204 #endif
205 static void vm_pageout_page_stats(int q);
206
207 /*
208  * Calculate approximately how many pages on each queue to try to
209  * clean.  An exact calculation creates an edge condition when the
210  * queues are unbalanced so add significant slop.  The queue scans
211  * will stop early when targets are reached and will start where they
212  * left off on the next pass.
213  *
214  * We need to be generous here because there are all sorts of loading
215  * conditions that can cause edge cases if try to average over all queues.
216  * In particular, storage subsystems have become so fast that paging
217  * activity can become quite frantic.  Eventually we will probably need
218  * two paging threads, one for dirty pages and one for clean, to deal
219  * with the bandwidth requirements.
220
221  * So what we do is calculate a value that can be satisfied nominally by
222  * only having to scan half the queues.
223  */
224 static __inline int
225 PQAVERAGE(int n)
226 {
227         int avg;
228
229         if (n >= 0) {
230                 avg = ((n + (PQ_L2_SIZE - 1)) / (PQ_L2_SIZE / 2) + 1);
231         } else {
232                 avg = ((n - (PQ_L2_SIZE - 1)) / (PQ_L2_SIZE / 2) - 1);
233         }
234         return avg;
235 }
236
237 /*
238  * vm_pageout_clean_helper:
239  *
240  * Clean the page and remove it from the laundry.  The page must not be
241  * busy on-call.
242  * 
243  * We set the busy bit to cause potential page faults on this page to
244  * block.  Note the careful timing, however, the busy bit isn't set till
245  * late and we cannot do anything that will mess with the page.
246  */
247 static int
248 vm_pageout_clean_helper(vm_page_t m, int vmflush_flags)
249 {
250         vm_object_t object;
251         vm_page_t mc[BLIST_MAX_ALLOC];
252         int error;
253         int ib, is, page_base;
254         vm_pindex_t pindex = m->pindex;
255
256         object = m->object;
257
258         /*
259          * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
260          * with the new swapper, but we could have serious problems paging
261          * out other object types if there is insufficient memory.  
262          *
263          * Unfortunately, checking free memory here is far too late, so the
264          * check has been moved up a procedural level.
265          */
266
267         /*
268          * Don't mess with the page if it's busy, held, or special
269          *
270          * XXX do we really need to check hold_count here?  hold_count
271          * isn't supposed to mess with vm_page ops except prevent the
272          * page from being reused.
273          */
274         if (m->hold_count != 0 || (m->flags & PG_UNMANAGED)) {
275                 vm_page_wakeup(m);
276                 return 0;
277         }
278
279         /*
280          * Place page in cluster.  Align cluster for optimal swap space
281          * allocation (whether it is swap or not).  This is typically ~16-32
282          * pages, which also tends to align the cluster to multiples of the
283          * filesystem block size if backed by a filesystem.
284          */
285         page_base = pindex % BLIST_MAX_ALLOC;
286         mc[page_base] = m;
287         ib = page_base - 1;
288         is = page_base + 1;
289
290         /*
291          * Scan object for clusterable pages.
292          *
293          * We can cluster ONLY if: ->> the page is NOT
294          * clean, wired, busy, held, or mapped into a
295          * buffer, and one of the following:
296          * 1) The page is inactive, or a seldom used
297          *    active page.
298          * -or-
299          * 2) we force the issue.
300          *
301          * During heavy mmap/modification loads the pageout
302          * daemon can really fragment the underlying file
303          * due to flushing pages out of order and not trying
304          * align the clusters (which leave sporatic out-of-order
305          * holes).  To solve this problem we do the reverse scan
306          * first and attempt to align our cluster, then do a 
307          * forward scan if room remains.
308          */
309         vm_object_hold(object);
310
311         while (ib >= 0) {
312                 vm_page_t p;
313
314                 p = vm_page_lookup_busy_try(object, pindex - page_base + ib,
315                                             TRUE, &error);
316                 if (error || p == NULL)
317                         break;
318                 if ((p->queue - p->pc) == PQ_CACHE ||
319                     (p->flags & PG_UNMANAGED)) {
320                         vm_page_wakeup(p);
321                         break;
322                 }
323                 vm_page_test_dirty(p);
324                 if (((p->dirty & p->valid) == 0 &&
325                      (p->flags & PG_NEED_COMMIT) == 0) ||
326                     p->wire_count != 0 ||       /* may be held by buf cache */
327                     p->hold_count != 0) {       /* may be undergoing I/O */
328                         vm_page_wakeup(p);
329                         break;
330                 }
331                 if (p->queue - p->pc != PQ_INACTIVE) {
332                         if (p->queue - p->pc != PQ_ACTIVE ||
333                             (vmflush_flags & VM_PAGER_ALLOW_ACTIVE) == 0) {
334                                 vm_page_wakeup(p);
335                                 break;
336                         }
337                 }
338
339                 /*
340                  * Try to maintain page groupings in the cluster.
341                  */
342                 if (m->flags & PG_WINATCFLS)
343                         vm_page_flag_set(p, PG_WINATCFLS);
344                 else
345                         vm_page_flag_clear(p, PG_WINATCFLS);
346                 p->act_count = m->act_count;
347
348                 mc[ib] = p;
349                 --ib;
350         }
351         ++ib;   /* fixup */
352
353         while (is < BLIST_MAX_ALLOC &&
354                pindex - page_base + is < object->size) {
355                 vm_page_t p;
356
357                 p = vm_page_lookup_busy_try(object, pindex - page_base + is,
358                                             TRUE, &error);
359                 if (error || p == NULL)
360                         break;
361                 if (((p->queue - p->pc) == PQ_CACHE) ||
362                     (p->flags & PG_UNMANAGED)) {
363                         vm_page_wakeup(p);
364                         break;
365                 }
366                 vm_page_test_dirty(p);
367                 if (((p->dirty & p->valid) == 0 &&
368                      (p->flags & PG_NEED_COMMIT) == 0) ||
369                     p->wire_count != 0 ||       /* may be held by buf cache */
370                     p->hold_count != 0) {       /* may be undergoing I/O */
371                         vm_page_wakeup(p);
372                         break;
373                 }
374                 if (p->queue - p->pc != PQ_INACTIVE) {
375                         if (p->queue - p->pc != PQ_ACTIVE ||
376                             (vmflush_flags & VM_PAGER_ALLOW_ACTIVE) == 0) {
377                                 vm_page_wakeup(p);
378                                 break;
379                         }
380                 }
381
382                 /*
383                  * Try to maintain page groupings in the cluster.
384                  */
385                 if (m->flags & PG_WINATCFLS)
386                         vm_page_flag_set(p, PG_WINATCFLS);
387                 else
388                         vm_page_flag_clear(p, PG_WINATCFLS);
389                 p->act_count = m->act_count;
390
391                 mc[is] = p;
392                 ++is;
393         }
394
395         vm_object_drop(object);
396
397         /*
398          * we allow reads during pageouts...
399          */
400         return vm_pageout_flush(&mc[ib], is - ib, vmflush_flags);
401 }
402
403 /*
404  * vm_pageout_flush() - launder the given pages
405  *
406  *      The given pages are laundered.  Note that we setup for the start of
407  *      I/O ( i.e. busy the page ), mark it read-only, and bump the object
408  *      reference count all in here rather then in the parent.  If we want
409  *      the parent to do more sophisticated things we may have to change
410  *      the ordering.
411  *
412  *      The pages in the array must be busied by the caller and will be
413  *      unbusied by this function.
414  */
415 int
416 vm_pageout_flush(vm_page_t *mc, int count, int vmflush_flags)
417 {
418         vm_object_t object;
419         int pageout_status[count];
420         int numpagedout = 0;
421         int i;
422
423         /*
424          * Initiate I/O.  Bump the vm_page_t->busy counter.
425          */
426         for (i = 0; i < count; i++) {
427                 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
428                         ("vm_pageout_flush page %p index %d/%d: partially "
429                          "invalid page", mc[i], i, count));
430                 vm_page_io_start(mc[i]);
431         }
432
433         /*
434          * We must make the pages read-only.  This will also force the
435          * modified bit in the related pmaps to be cleared.  The pager
436          * cannot clear the bit for us since the I/O completion code
437          * typically runs from an interrupt.  The act of making the page
438          * read-only handles the case for us.
439          *
440          * Then we can unbusy the pages, we still hold a reference by virtue
441          * of our soft-busy.
442          */
443         for (i = 0; i < count; i++) {
444                 if (vmflush_flags & VM_PAGER_TRY_TO_CACHE)
445                         vm_page_protect(mc[i], VM_PROT_NONE);
446                 else
447                         vm_page_protect(mc[i], VM_PROT_READ);
448                 vm_page_wakeup(mc[i]);
449         }
450
451         object = mc[0]->object;
452         vm_object_pip_add(object, count);
453
454         vm_pager_put_pages(object, mc, count,
455             (vmflush_flags |
456              ((object == &kernel_object) ? VM_PAGER_PUT_SYNC : 0)),
457             pageout_status);
458
459         for (i = 0; i < count; i++) {
460                 vm_page_t mt = mc[i];
461
462                 switch (pageout_status[i]) {
463                 case VM_PAGER_OK:
464                         numpagedout++;
465                         break;
466                 case VM_PAGER_PEND:
467                         numpagedout++;
468                         break;
469                 case VM_PAGER_BAD:
470                         /*
471                          * Page outside of range of object. Right now we
472                          * essentially lose the changes by pretending it
473                          * worked.
474                          */
475                         vm_page_busy_wait(mt, FALSE, "pgbad");
476                         pmap_clear_modify(mt);
477                         vm_page_undirty(mt);
478                         vm_page_wakeup(mt);
479                         break;
480                 case VM_PAGER_ERROR:
481                 case VM_PAGER_FAIL:
482                         /*
483                          * A page typically cannot be paged out when we
484                          * have run out of swap.  We leave the page
485                          * marked inactive and will try to page it out
486                          * again later.
487                          *
488                          * Starvation of the active page list is used to
489                          * determine when the system is massively memory
490                          * starved.
491                          */
492                         break;
493                 case VM_PAGER_AGAIN:
494                         break;
495                 }
496
497                 /*
498                  * If not PENDing this was a synchronous operation and we
499                  * clean up after the I/O.  If it is PENDing the mess is
500                  * cleaned up asynchronously.
501                  *
502                  * Also nominally act on the caller's wishes if the caller
503                  * wants to try to really clean (cache or free) the page.
504                  *
505                  * Also nominally deactivate the page if the system is
506                  * memory-stressed.
507                  */
508                 if (pageout_status[i] != VM_PAGER_PEND) {
509                         vm_page_busy_wait(mt, FALSE, "pgouw");
510                         vm_page_io_finish(mt);
511                         if (vmflush_flags & VM_PAGER_TRY_TO_CACHE) {
512                                 vm_page_try_to_cache(mt);
513                         } else if (vm_page_count_severe()) {
514                                 vm_page_deactivate(mt);
515                                 vm_page_wakeup(mt);
516                         } else {
517                                 vm_page_wakeup(mt);
518                         }
519                         vm_object_pip_wakeup(object);
520                 }
521         }
522         return numpagedout;
523 }
524
525 #if !defined(NO_SWAPPING)
526
527 /*
528  * Callback function, page busied for us.  We must dispose of the busy
529  * condition.  Any related pmap pages may be held but will not be locked.
530  */
531 static
532 int
533 vm_pageout_mdp_callback(struct pmap_pgscan_info *info, vm_offset_t va,
534                         vm_page_t p)
535 {
536         int actcount;
537         int cleanit = 0;
538
539         /*
540          * Basic tests - There should never be a marker, and we can stop
541          *               once the RSS is below the required level.
542          */
543         KKASSERT((p->flags & PG_MARKER) == 0);
544         if (pmap_resident_tlnw_count(info->pmap) <= info->limit) {
545                 vm_page_wakeup(p);
546                 return(-1);
547         }
548
549         mycpu->gd_cnt.v_pdpages++;
550
551         if (p->wire_count || p->hold_count || (p->flags & PG_UNMANAGED)) {
552                 vm_page_wakeup(p);
553                 goto done;
554         }
555
556         ++info->actioncount;
557
558         /*
559          * Check if the page has been referened recently.  If it has,
560          * activate it and skip.
561          */
562         actcount = pmap_ts_referenced(p);
563         if (actcount) {
564                 vm_page_flag_set(p, PG_REFERENCED);
565         } else if (p->flags & PG_REFERENCED) {
566                 actcount = 1;
567         }
568
569         if (actcount) {
570                 if (p->queue - p->pc != PQ_ACTIVE) {
571                         vm_page_and_queue_spin_lock(p);
572                         if (p->queue - p->pc != PQ_ACTIVE) {
573                                 vm_page_and_queue_spin_unlock(p);
574                                 vm_page_activate(p);
575                         } else {
576                                 vm_page_and_queue_spin_unlock(p);
577                         }
578                 } else {
579                         p->act_count += actcount;
580                         if (p->act_count > ACT_MAX)
581                                 p->act_count = ACT_MAX;
582                 }
583                 vm_page_flag_clear(p, PG_REFERENCED);
584                 vm_page_wakeup(p);
585                 goto done;
586         }
587
588         /*
589          * Remove the page from this particular pmap.  Once we do this, our
590          * pmap scans will not see it again (unless it gets faulted in), so
591          * we must actively dispose of or deal with the page.
592          */
593         pmap_remove_specific(info->pmap, p);
594
595         /*
596          * If the page is not mapped to another process (i.e. as would be
597          * typical if this were a shared page from a library) then deactivate
598          * the page and clean it in two passes only.
599          *
600          * If the page hasn't been referenced since the last check, remove it
601          * from the pmap.  If it is no longer mapped, deactivate it
602          * immediately, accelerating the normal decline.
603          *
604          * Once the page has been removed from the pmap the RSS code no
605          * longer tracks it so we have to make sure that it is staged for
606          * potential flush action.
607          */
608         if ((p->flags & PG_MAPPED) == 0) {
609                 if (p->queue - p->pc == PQ_ACTIVE) {
610                         vm_page_deactivate(p);
611                 }
612                 if (p->queue - p->pc == PQ_INACTIVE) {
613                         cleanit = 1;
614                 }
615         }
616
617         /*
618          * Ok, try to fully clean the page and any nearby pages such that at
619          * least the requested page is freed or moved to the cache queue.
620          *
621          * We usually do this synchronously to allow us to get the page into
622          * the CACHE queue quickly, which will prevent memory exhaustion if
623          * a process with a memoryuse limit is running away.  However, the
624          * sysadmin may desire to set vm.swap_user_async which relaxes this
625          * and improves write performance.
626          */
627         if (cleanit) {
628                 int max_launder = 0x7FFF;
629                 int vnodes_skipped = 0;
630                 int vmflush_flags;
631                 struct vnode *vpfailed = NULL;
632
633                 info->offset = va;
634
635                 if (vm_pageout_memuse_mode >= 2) {
636                         vmflush_flags = VM_PAGER_TRY_TO_CACHE |
637                                         VM_PAGER_ALLOW_ACTIVE;
638                         if (swap_user_async == 0)
639                                 vmflush_flags |= VM_PAGER_PUT_SYNC;
640                         vm_page_flag_set(p, PG_WINATCFLS);
641                         info->cleancount +=
642                                 vm_pageout_page(p, &max_launder,
643                                                 &vnodes_skipped,
644                                                 &vpfailed, 1, vmflush_flags);
645                 } else {
646                         vm_page_wakeup(p);
647                         ++info->cleancount;
648                 }
649         } else {
650                 vm_page_wakeup(p);
651         }
652
653         /*
654          * Must be at end to avoid SMP races.
655          */
656 done:
657         lwkt_user_yield();
658         return 0;
659 }
660
661 /*
662  * Deactivate some number of pages in a map due to set RLIMIT_RSS limits.
663  * that is relatively difficult to do.  We try to keep track of where we
664  * left off last time to reduce scan overhead.
665  *
666  * Called when vm_pageout_memuse_mode is >= 1.
667  */
668 void
669 vm_pageout_map_deactivate_pages(vm_map_t map, vm_pindex_t limit)
670 {
671         vm_offset_t pgout_offset;
672         struct pmap_pgscan_info info;
673         int retries = 3;
674
675         pgout_offset = map->pgout_offset;
676 again:
677 #if 0
678         kprintf("%016jx ", pgout_offset);
679 #endif
680         if (pgout_offset < VM_MIN_USER_ADDRESS)
681                 pgout_offset = VM_MIN_USER_ADDRESS;
682         if (pgout_offset >= VM_MAX_USER_ADDRESS)
683                 pgout_offset = 0;
684         info.pmap = vm_map_pmap(map);
685         info.limit = limit;
686         info.beg_addr = pgout_offset;
687         info.end_addr = VM_MAX_USER_ADDRESS;
688         info.callback = vm_pageout_mdp_callback;
689         info.cleancount = 0;
690         info.actioncount = 0;
691         info.busycount = 0;
692
693         pmap_pgscan(&info);
694         pgout_offset = info.offset;
695 #if 0
696         kprintf("%016jx %08lx %08lx\n", pgout_offset,
697                 info.cleancount, info.actioncount);
698 #endif
699
700         if (pgout_offset != VM_MAX_USER_ADDRESS &&
701             pmap_resident_tlnw_count(vm_map_pmap(map)) > limit) {
702                 goto again;
703         } else if (retries &&
704                    pmap_resident_tlnw_count(vm_map_pmap(map)) > limit) {
705                 --retries;
706                 goto again;
707         }
708         map->pgout_offset = pgout_offset;
709 }
710 #endif
711
712 /*
713  * Called when the pageout scan wants to free a page.  We no longer
714  * try to cycle the vm_object here with a reference & dealloc, which can
715  * cause a non-trivial object collapse in a critical path.
716  *
717  * It is unclear why we cycled the ref_count in the past, perhaps to try
718  * to optimize shadow chain collapses but I don't quite see why it would
719  * be necessary.  An OBJ_DEAD object should terminate any and all vm_pages
720  * synchronously and not have to be kicked-start.
721  */
722 static void
723 vm_pageout_page_free(vm_page_t m) 
724 {
725         vm_page_protect(m, VM_PROT_NONE);
726         vm_page_free(m);
727 }
728
729 /*
730  * vm_pageout_scan does the dirty work for the pageout daemon.
731  */
732 struct vm_pageout_scan_info {
733         struct proc *bigproc;
734         vm_offset_t bigsize;
735 };
736
737 static int vm_pageout_scan_callback(struct proc *p, void *data);
738
739 static int
740 vm_pageout_scan_inactive(int pass, int q, int avail_shortage,
741                          int *vnodes_skipped)
742 {
743         vm_page_t m;
744         struct vm_page marker;
745         struct vnode *vpfailed;         /* warning, allowed to be stale */
746         int maxscan;
747         int delta = 0;
748         int max_launder;
749
750         /*
751          * Start scanning the inactive queue for pages we can move to the
752          * cache or free.  The scan will stop when the target is reached or
753          * we have scanned the entire inactive queue.  Note that m->act_count
754          * is not used to form decisions for the inactive queue, only for the
755          * active queue.
756          *
757          * max_launder limits the number of dirty pages we flush per scan.
758          * For most systems a smaller value (16 or 32) is more robust under
759          * extreme memory and disk pressure because any unnecessary writes
760          * to disk can result in extreme performance degredation.  However,
761          * systems with excessive dirty pages (especially when MAP_NOSYNC is
762          * used) will die horribly with limited laundering.  If the pageout
763          * daemon cannot clean enough pages in the first pass, we let it go
764          * all out in succeeding passes.
765          */
766         if ((max_launder = vm_max_launder) <= 1)
767                 max_launder = 1;
768         if (pass)
769                 max_launder = 10000;
770
771         /*
772          * Initialize our marker
773          */
774         bzero(&marker, sizeof(marker));
775         marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
776         marker.queue = PQ_INACTIVE + q;
777         marker.pc = q;
778         marker.wire_count = 1;
779
780         /*
781          * Inactive queue scan.
782          *
783          * NOTE: The vm_page must be spinlocked before the queue to avoid
784          *       deadlocks, so it is easiest to simply iterate the loop
785          *       with the queue unlocked at the top.
786          */
787         vpfailed = NULL;
788
789         vm_page_queues_spin_lock(PQ_INACTIVE + q);
790         TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
791         maxscan = vm_page_queues[PQ_INACTIVE + q].lcnt;
792
793         /*
794          * Queue locked at top of loop to avoid stack marker issues.
795          */
796         while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
797                maxscan-- > 0 && avail_shortage - delta > 0)
798         {
799                 int count;
800
801                 KKASSERT(m->queue == PQ_INACTIVE + q);
802                 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl,
803                              &marker, pageq);
804                 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE + q].pl, m,
805                                    &marker, pageq);
806                 mycpu->gd_cnt.v_pdpages++;
807
808                 /*
809                  * Skip marker pages (atomic against other markers to avoid
810                  * infinite hop-over scans).
811                  */
812                 if (m->flags & PG_MARKER)
813                         continue;
814
815                 /*
816                  * Try to busy the page.  Don't mess with pages which are
817                  * already busy or reorder them in the queue.
818                  */
819                 if (vm_page_busy_try(m, TRUE))
820                         continue;
821
822                 /*
823                  * Remaining operations run with the page busy and neither
824                  * the page or the queue will be spin-locked.
825                  */
826                 vm_page_queues_spin_unlock(PQ_INACTIVE + q);
827                 KKASSERT(m->queue == PQ_INACTIVE + q);
828
829                 count = vm_pageout_page(m, &max_launder, vnodes_skipped,
830                                         &vpfailed, pass, 0);
831                 delta += count;
832
833                 /*
834                  * Systems with a ton of memory can wind up with huge
835                  * deactivation counts.  Because the inactive scan is
836                  * doing a lot of flushing, the combination can result
837                  * in excessive paging even in situations where other
838                  * unrelated threads free up sufficient VM.
839                  *
840                  * To deal with this we abort the nominal active->inactive
841                  * scan before we hit the inactive target when free+cache
842                  * levels have reached a reasonable target.
843                  *
844                  * When deciding to stop early we need to add some slop to
845                  * the test and we need to return full completion to the caller
846                  * to prevent the caller from thinking there is something
847                  * wrong and issuing a low-memory+swap warning or pkill.
848                  *
849                  * A deficit forces paging regardless of the state of the
850                  * VM page queues (used for RSS enforcement).
851                  */
852                 lwkt_yield();
853                 vm_page_queues_spin_lock(PQ_INACTIVE + q);
854                 if (vm_paging_target() < -vm_max_launder) {
855                         /*
856                          * Stopping early, return full completion to caller.
857                          */
858                         if (delta < avail_shortage)
859                                 delta = avail_shortage;
860                         break;
861                 }
862         }
863
864         /* page queue still spin-locked */
865         TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
866         vm_page_queues_spin_unlock(PQ_INACTIVE + q);
867
868         return (delta);
869 }
870
871 /*
872  * Pageout the specified page, return the total number of pages paged out
873  * (this routine may cluster).
874  *
875  * The page must be busied and soft-busied by the caller and will be disposed
876  * of by this function.
877  */
878 static int
879 vm_pageout_page(vm_page_t m, int *max_launderp, int *vnodes_skippedp,
880                 struct vnode **vpfailedp, int pass, int vmflush_flags)
881 {
882         vm_object_t object;
883         int actcount;
884         int count = 0;
885
886         /*
887          * It is possible for a page to be busied ad-hoc (e.g. the
888          * pmap_collect() code) and wired and race against the
889          * allocation of a new page.  vm_page_alloc() may be forced
890          * to deactivate the wired page in which case it winds up
891          * on the inactive queue and must be handled here.  We
892          * correct the problem simply by unqueuing the page.
893          */
894         if (m->wire_count) {
895                 vm_page_unqueue_nowakeup(m);
896                 vm_page_wakeup(m);
897                 kprintf("WARNING: pagedaemon: wired page on "
898                         "inactive queue %p\n", m);
899                 return 0;
900         }
901
902         /*
903          * A held page may be undergoing I/O, so skip it.
904          */
905         if (m->hold_count) {
906                 vm_page_and_queue_spin_lock(m);
907                 if (m->queue - m->pc == PQ_INACTIVE) {
908                         TAILQ_REMOVE(
909                                 &vm_page_queues[m->queue].pl, m, pageq);
910                         TAILQ_INSERT_TAIL(
911                                 &vm_page_queues[m->queue].pl, m, pageq);
912                         ++vm_swapcache_inactive_heuristic;
913                 }
914                 vm_page_and_queue_spin_unlock(m);
915                 vm_page_wakeup(m);
916                 return 0;
917         }
918
919         if (m->object == NULL || m->object->ref_count == 0) {
920                 /*
921                  * If the object is not being used, we ignore previous
922                  * references.
923                  */
924                 vm_page_flag_clear(m, PG_REFERENCED);
925                 pmap_clear_reference(m);
926                 /* fall through to end */
927         } else if (((m->flags & PG_REFERENCED) == 0) &&
928                     (actcount = pmap_ts_referenced(m))) {
929                 /*
930                  * Otherwise, if the page has been referenced while
931                  * in the inactive queue, we bump the "activation
932                  * count" upwards, making it less likely that the
933                  * page will be added back to the inactive queue
934                  * prematurely again.  Here we check the page tables
935                  * (or emulated bits, if any), given the upper level
936                  * VM system not knowing anything about existing
937                  * references.
938                  */
939                 vm_page_activate(m);
940                 m->act_count += (actcount + ACT_ADVANCE);
941                 vm_page_wakeup(m);
942                 return 0;
943         }
944
945         /*
946          * (m) is still busied.
947          *
948          * If the upper level VM system knows about any page
949          * references, we activate the page.  We also set the
950          * "activation count" higher than normal so that we will less
951          * likely place pages back onto the inactive queue again.
952          */
953         if ((m->flags & PG_REFERENCED) != 0) {
954                 vm_page_flag_clear(m, PG_REFERENCED);
955                 actcount = pmap_ts_referenced(m);
956                 vm_page_activate(m);
957                 m->act_count += (actcount + ACT_ADVANCE + 1);
958                 vm_page_wakeup(m);
959                 return 0;
960         }
961
962         /*
963          * If the upper level VM system doesn't know anything about
964          * the page being dirty, we have to check for it again.  As
965          * far as the VM code knows, any partially dirty pages are
966          * fully dirty.
967          *
968          * Pages marked PG_WRITEABLE may be mapped into the user
969          * address space of a process running on another cpu.  A
970          * user process (without holding the MP lock) running on
971          * another cpu may be able to touch the page while we are
972          * trying to remove it.  vm_page_cache() will handle this
973          * case for us.
974          */
975         if (m->dirty == 0) {
976                 vm_page_test_dirty(m);
977         } else {
978                 vm_page_dirty(m);
979         }
980
981         if (m->valid == 0 && (m->flags & PG_NEED_COMMIT) == 0) {
982                 /*
983                  * Invalid pages can be easily freed
984                  */
985                 vm_pageout_page_free(m);
986                 mycpu->gd_cnt.v_dfree++;
987                 ++count;
988         } else if (m->dirty == 0 && (m->flags & PG_NEED_COMMIT) == 0) {
989                 /*
990                  * Clean pages can be placed onto the cache queue.
991                  * This effectively frees them.
992                  */
993                 vm_page_cache(m);
994                 ++count;
995         } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) {
996                 /*
997                  * Dirty pages need to be paged out, but flushing
998                  * a page is extremely expensive verses freeing
999                  * a clean page.  Rather then artificially limiting
1000                  * the number of pages we can flush, we instead give
1001                  * dirty pages extra priority on the inactive queue
1002                  * by forcing them to be cycled through the queue
1003                  * twice before being flushed, after which the
1004                  * (now clean) page will cycle through once more
1005                  * before being freed.  This significantly extends
1006                  * the thrash point for a heavily loaded machine.
1007                  */
1008                 vm_page_flag_set(m, PG_WINATCFLS);
1009                 vm_page_and_queue_spin_lock(m);
1010                 if (m->queue - m->pc == PQ_INACTIVE) {
1011                         TAILQ_REMOVE(
1012                                 &vm_page_queues[m->queue].pl, m, pageq);
1013                         TAILQ_INSERT_TAIL(
1014                                 &vm_page_queues[m->queue].pl, m, pageq);
1015                         ++vm_swapcache_inactive_heuristic;
1016                 }
1017                 vm_page_and_queue_spin_unlock(m);
1018                 vm_page_wakeup(m);
1019         } else if (*max_launderp > 0) {
1020                 /*
1021                  * We always want to try to flush some dirty pages if
1022                  * we encounter them, to keep the system stable.
1023                  * Normally this number is small, but under extreme
1024                  * pressure where there are insufficient clean pages
1025                  * on the inactive queue, we may have to go all out.
1026                  */
1027                 int swap_pageouts_ok;
1028                 struct vnode *vp = NULL;
1029
1030                 swap_pageouts_ok = 0;
1031                 object = m->object;
1032                 if (object &&
1033                     (object->type != OBJT_SWAP) &&
1034                     (object->type != OBJT_DEFAULT)) {
1035                         swap_pageouts_ok = 1;
1036                 } else {
1037                         swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
1038                         swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
1039                         vm_page_count_min(0));
1040                 }
1041
1042                 /*
1043                  * We don't bother paging objects that are "dead".
1044                  * Those objects are in a "rundown" state.
1045                  */
1046                 if (!swap_pageouts_ok ||
1047                     (object == NULL) ||
1048                     (object->flags & OBJ_DEAD)) {
1049                         vm_page_and_queue_spin_lock(m);
1050                         if (m->queue - m->pc == PQ_INACTIVE) {
1051                                 TAILQ_REMOVE(
1052                                     &vm_page_queues[m->queue].pl,
1053                                     m, pageq);
1054                                 TAILQ_INSERT_TAIL(
1055                                     &vm_page_queues[m->queue].pl,
1056                                     m, pageq);
1057                                 ++vm_swapcache_inactive_heuristic;
1058                         }
1059                         vm_page_and_queue_spin_unlock(m);
1060                         vm_page_wakeup(m);
1061                         return 0;
1062                 }
1063
1064                 /*
1065                  * (m) is still busied.
1066                  *
1067                  * The object is already known NOT to be dead.   It
1068                  * is possible for the vget() to block the whole
1069                  * pageout daemon, but the new low-memory handling
1070                  * code should prevent it.
1071                  *
1072                  * The previous code skipped locked vnodes and, worse,
1073                  * reordered pages in the queue.  This results in
1074                  * completely non-deterministic operation because,
1075                  * quite often, a vm_fault has initiated an I/O and
1076                  * is holding a locked vnode at just the point where
1077                  * the pageout daemon is woken up.
1078                  *
1079                  * We can't wait forever for the vnode lock, we might
1080                  * deadlock due to a vn_read() getting stuck in
1081                  * vm_wait while holding this vnode.  We skip the
1082                  * vnode if we can't get it in a reasonable amount
1083                  * of time.
1084                  *
1085                  * vpfailed is used to (try to) avoid the case where
1086                  * a large number of pages are associated with a
1087                  * locked vnode, which could cause the pageout daemon
1088                  * to stall for an excessive amount of time.
1089                  */
1090                 if (object->type == OBJT_VNODE) {
1091                         int flags;
1092
1093                         vp = object->handle;
1094                         flags = LK_EXCLUSIVE;
1095                         if (vp == *vpfailedp)
1096                                 flags |= LK_NOWAIT;
1097                         else
1098                                 flags |= LK_TIMELOCK;
1099                         vm_page_hold(m);
1100                         vm_page_wakeup(m);
1101
1102                         /*
1103                          * We have unbusied (m) temporarily so we can
1104                          * acquire the vp lock without deadlocking.
1105                          * (m) is held to prevent destruction.
1106                          */
1107                         if (vget(vp, flags) != 0) {
1108                                 *vpfailedp = vp;
1109                                 ++pageout_lock_miss;
1110                                 if (object->flags & OBJ_MIGHTBEDIRTY)
1111                                             ++*vnodes_skippedp;
1112                                 vm_page_unhold(m);
1113                                 return 0;
1114                         }
1115
1116                         /*
1117                          * The page might have been moved to another
1118                          * queue during potential blocking in vget()
1119                          * above.  The page might have been freed and
1120                          * reused for another vnode.  The object might
1121                          * have been reused for another vnode.
1122                          */
1123                         if (m->queue - m->pc != PQ_INACTIVE ||
1124                             m->object != object ||
1125                             object->handle != vp) {
1126                                 if (object->flags & OBJ_MIGHTBEDIRTY)
1127                                         ++*vnodes_skippedp;
1128                                 vput(vp);
1129                                 vm_page_unhold(m);
1130                                 return 0;
1131                         }
1132
1133                         /*
1134                          * The page may have been busied during the
1135                          * blocking in vput();  We don't move the
1136                          * page back onto the end of the queue so that
1137                          * statistics are more correct if we don't.
1138                          */
1139                         if (vm_page_busy_try(m, TRUE)) {
1140                                 vput(vp);
1141                                 vm_page_unhold(m);
1142                                 return 0;
1143                         }
1144                         vm_page_unhold(m);
1145
1146                         /*
1147                          * (m) is busied again
1148                          *
1149                          * We own the busy bit and remove our hold
1150                          * bit.  If the page is still held it
1151                          * might be undergoing I/O, so skip it.
1152                          */
1153                         if (m->hold_count) {
1154                                 vm_page_and_queue_spin_lock(m);
1155                                 if (m->queue - m->pc == PQ_INACTIVE) {
1156                                         TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq);
1157                                         TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m, pageq);
1158                                         ++vm_swapcache_inactive_heuristic;
1159                                 }
1160                                 vm_page_and_queue_spin_unlock(m);
1161                                 if (object->flags & OBJ_MIGHTBEDIRTY)
1162                                         ++*vnodes_skippedp;
1163                                 vm_page_wakeup(m);
1164                                 vput(vp);
1165                                 return 0;
1166                         }
1167                         /* (m) is left busied as we fall through */
1168                 }
1169
1170                 /*
1171                  * page is busy and not held here.
1172                  *
1173                  * If a page is dirty, then it is either being washed
1174                  * (but not yet cleaned) or it is still in the
1175                  * laundry.  If it is still in the laundry, then we
1176                  * start the cleaning operation.
1177                  *
1178                  * decrement inactive_shortage on success to account
1179                  * for the (future) cleaned page.  Otherwise we
1180                  * could wind up laundering or cleaning too many
1181                  * pages.
1182                  *
1183                  * NOTE: Cleaning the page here does not cause
1184                  *       force_deficit to be adjusted, because the
1185                  *       page is not being freed or moved to the
1186                  *       cache.
1187                  */
1188                 count = vm_pageout_clean_helper(m, vmflush_flags);
1189                 *max_launderp -= count;
1190
1191                 /*
1192                  * Clean ate busy, page no longer accessible
1193                  */
1194                 if (vp != NULL)
1195                         vput(vp);
1196         } else {
1197                 vm_page_wakeup(m);
1198         }
1199         return count;
1200 }
1201
1202 static int
1203 vm_pageout_scan_active(int pass, int q,
1204                        int avail_shortage, int inactive_shortage,
1205                        int *recycle_countp)
1206 {
1207         struct vm_page marker;
1208         vm_page_t m;
1209         int actcount;
1210         int delta = 0;
1211         int maxscan;
1212
1213         /*
1214          * We want to move pages from the active queue to the inactive
1215          * queue to get the inactive queue to the inactive target.  If
1216          * we still have a page shortage from above we try to directly free
1217          * clean pages instead of moving them.
1218          *
1219          * If we do still have a shortage we keep track of the number of
1220          * pages we free or cache (recycle_count) as a measure of thrashing
1221          * between the active and inactive queues.
1222          *
1223          * If we were able to completely satisfy the free+cache targets
1224          * from the inactive pool we limit the number of pages we move
1225          * from the active pool to the inactive pool to 2x the pages we
1226          * had removed from the inactive pool (with a minimum of 1/5 the
1227          * inactive target).  If we were not able to completely satisfy
1228          * the free+cache targets we go for the whole target aggressively.
1229          *
1230          * NOTE: Both variables can end up negative.
1231          * NOTE: We are still in a critical section.
1232          */
1233
1234         bzero(&marker, sizeof(marker));
1235         marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
1236         marker.queue = PQ_ACTIVE + q;
1237         marker.pc = q;
1238         marker.wire_count = 1;
1239
1240         vm_page_queues_spin_lock(PQ_ACTIVE + q);
1241         TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1242         maxscan = vm_page_queues[PQ_ACTIVE + q].lcnt;
1243
1244         /*
1245          * Queue locked at top of loop to avoid stack marker issues.
1246          */
1247         while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
1248                maxscan-- > 0 && (avail_shortage - delta > 0 ||
1249                                 inactive_shortage > 0))
1250         {
1251                 KKASSERT(m->queue == PQ_ACTIVE + q);
1252                 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl,
1253                              &marker, pageq);
1254                 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
1255                                    &marker, pageq);
1256
1257                 /*
1258                  * Skip marker pages (atomic against other markers to avoid
1259                  * infinite hop-over scans).
1260                  */
1261                 if (m->flags & PG_MARKER)
1262                         continue;
1263
1264                 /*
1265                  * Try to busy the page.  Don't mess with pages which are
1266                  * already busy or reorder them in the queue.
1267                  */
1268                 if (vm_page_busy_try(m, TRUE))
1269                         continue;
1270
1271                 /*
1272                  * Remaining operations run with the page busy and neither
1273                  * the page or the queue will be spin-locked.
1274                  */
1275                 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1276                 KKASSERT(m->queue == PQ_ACTIVE + q);
1277
1278                 /*
1279                  * Don't deactivate pages that are held, even if we can
1280                  * busy them.  (XXX why not?)
1281                  */
1282                 if (m->hold_count != 0) {
1283                         vm_page_and_queue_spin_lock(m);
1284                         if (m->queue - m->pc == PQ_ACTIVE) {
1285                                 TAILQ_REMOVE(
1286                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1287                                         m, pageq);
1288                                 TAILQ_INSERT_TAIL(
1289                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1290                                         m, pageq);
1291                         }
1292                         vm_page_and_queue_spin_unlock(m);
1293                         vm_page_wakeup(m);
1294                         goto next;
1295                 }
1296
1297                 /*
1298                  * The count for pagedaemon pages is done after checking the
1299                  * page for eligibility...
1300                  */
1301                 mycpu->gd_cnt.v_pdpages++;
1302
1303                 /*
1304                  * Check to see "how much" the page has been used and clear
1305                  * the tracking access bits.  If the object has no references
1306                  * don't bother paying the expense.
1307                  */
1308                 actcount = 0;
1309                 if (m->object && m->object->ref_count != 0) {
1310                         if (m->flags & PG_REFERENCED)
1311                                 ++actcount;
1312                         actcount += pmap_ts_referenced(m);
1313                         if (actcount) {
1314                                 m->act_count += ACT_ADVANCE + actcount;
1315                                 if (m->act_count > ACT_MAX)
1316                                         m->act_count = ACT_MAX;
1317                         }
1318                 }
1319                 vm_page_flag_clear(m, PG_REFERENCED);
1320
1321                 /*
1322                  * actcount is only valid if the object ref_count is non-zero.
1323                  * If the page does not have an object, actcount will be zero.
1324                  */
1325                 if (actcount && m->object->ref_count != 0) {
1326                         vm_page_and_queue_spin_lock(m);
1327                         if (m->queue - m->pc == PQ_ACTIVE) {
1328                                 TAILQ_REMOVE(
1329                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1330                                         m, pageq);
1331                                 TAILQ_INSERT_TAIL(
1332                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1333                                         m, pageq);
1334                         }
1335                         vm_page_and_queue_spin_unlock(m);
1336                         vm_page_wakeup(m);
1337                 } else {
1338                         switch(m->object->type) {
1339                         case OBJT_DEFAULT:
1340                         case OBJT_SWAP:
1341                                 m->act_count -= min(m->act_count,
1342                                                     vm_anonmem_decline);
1343                                 break;
1344                         default:
1345                                 m->act_count -= min(m->act_count,
1346                                                     vm_filemem_decline);
1347                                 break;
1348                         }
1349                         if (vm_pageout_algorithm ||
1350                             (m->object == NULL) ||
1351                             (m->object && (m->object->ref_count == 0)) ||
1352                             m->act_count < pass + 1
1353                         ) {
1354                                 /*
1355                                  * Deactivate the page.  If we had a
1356                                  * shortage from our inactive scan try to
1357                                  * free (cache) the page instead.
1358                                  *
1359                                  * Don't just blindly cache the page if
1360                                  * we do not have a shortage from the
1361                                  * inactive scan, that could lead to
1362                                  * gigabytes being moved.
1363                                  */
1364                                 --inactive_shortage;
1365                                 if (avail_shortage - delta > 0 ||
1366                                     (m->object && (m->object->ref_count == 0)))
1367                                 {
1368                                         if (avail_shortage - delta > 0)
1369                                                 ++*recycle_countp;
1370                                         vm_page_protect(m, VM_PROT_NONE);
1371                                         if (m->dirty == 0 &&
1372                                             (m->flags & PG_NEED_COMMIT) == 0 &&
1373                                             avail_shortage - delta > 0) {
1374                                                 vm_page_cache(m);
1375                                         } else {
1376                                                 vm_page_deactivate(m);
1377                                                 vm_page_wakeup(m);
1378                                         }
1379                                 } else {
1380                                         vm_page_deactivate(m);
1381                                         vm_page_wakeup(m);
1382                                 }
1383                                 ++delta;
1384                         } else {
1385                                 vm_page_and_queue_spin_lock(m);
1386                                 if (m->queue - m->pc == PQ_ACTIVE) {
1387                                         TAILQ_REMOVE(
1388                                             &vm_page_queues[PQ_ACTIVE + q].pl,
1389                                             m, pageq);
1390                                         TAILQ_INSERT_TAIL(
1391                                             &vm_page_queues[PQ_ACTIVE + q].pl,
1392                                             m, pageq);
1393                                 }
1394                                 vm_page_and_queue_spin_unlock(m);
1395                                 vm_page_wakeup(m);
1396                         }
1397                 }
1398 next:
1399                 lwkt_yield();
1400                 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1401         }
1402
1403         /*
1404          * Clean out our local marker.
1405          *
1406          * Page queue still spin-locked.
1407          */
1408         TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1409         vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1410
1411         return (delta);
1412 }
1413
1414 /*
1415  * The number of actually free pages can drop down to v_free_reserved,
1416  * we try to build the free count back above v_free_min.  Note that
1417  * vm_paging_needed() also returns TRUE if v_free_count is not at
1418  * least v_free_min so that is the minimum we must build the free
1419  * count to.
1420  *
1421  * We use a slightly higher target to improve hysteresis,
1422  * ((v_free_target + v_free_min) / 2).  Since v_free_target
1423  * is usually the same as v_cache_min this maintains about
1424  * half the pages in the free queue as are in the cache queue,
1425  * providing pretty good pipelining for pageout operation.
1426  *
1427  * The system operator can manipulate vm.v_cache_min and
1428  * vm.v_free_target to tune the pageout demon.  Be sure
1429  * to keep vm.v_free_min < vm.v_free_target.
1430  *
1431  * Note that the original paging target is to get at least
1432  * (free_min + cache_min) into (free + cache).  The slightly
1433  * higher target will shift additional pages from cache to free
1434  * without effecting the original paging target in order to
1435  * maintain better hysteresis and not have the free count always
1436  * be dead-on v_free_min.
1437  *
1438  * NOTE: we are still in a critical section.
1439  *
1440  * Pages moved from PQ_CACHE to totally free are not counted in the
1441  * pages_freed counter.
1442  */
1443 static void
1444 vm_pageout_scan_cache(int avail_shortage, int pass,
1445                       int vnodes_skipped, int recycle_count)
1446 {
1447         static int lastkillticks;
1448         struct vm_pageout_scan_info info;
1449         vm_page_t m;
1450
1451         while (vmstats.v_free_count <
1452                (vmstats.v_free_min + vmstats.v_free_target) / 2) {
1453                 /*
1454                  * This steals some code from vm/vm_page.c
1455                  */
1456                 static int cache_rover = 0;
1457
1458                 m = vm_page_list_find(PQ_CACHE,
1459                                       cache_rover & PQ_L2_MASK, FALSE);
1460                 if (m == NULL)
1461                         break;
1462                 /* page is returned removed from its queue and spinlocked */
1463                 if (vm_page_busy_try(m, TRUE)) {
1464                         vm_page_deactivate_locked(m);
1465                         vm_page_spin_unlock(m);
1466                         continue;
1467                 }
1468                 vm_page_spin_unlock(m);
1469                 pagedaemon_wakeup();
1470                 lwkt_yield();
1471
1472                 /*
1473                  * Remaining operations run with the page busy and neither
1474                  * the page or the queue will be spin-locked.
1475                  */
1476                 if ((m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) ||
1477                     m->hold_count ||
1478                     m->wire_count) {
1479                         vm_page_deactivate(m);
1480                         vm_page_wakeup(m);
1481                         continue;
1482                 }
1483                 KKASSERT((m->flags & PG_MAPPED) == 0);
1484                 KKASSERT(m->dirty == 0);
1485                 cache_rover += PQ_PRIME2;
1486                 vm_pageout_page_free(m);
1487                 mycpu->gd_cnt.v_dfree++;
1488         }
1489
1490 #if !defined(NO_SWAPPING)
1491         /*
1492          * Idle process swapout -- run once per second.
1493          */
1494         if (vm_swap_idle_enabled) {
1495                 static time_t lsec;
1496                 if (time_uptime != lsec) {
1497                         atomic_set_int(&vm_pageout_req_swapout, VM_SWAP_IDLE);
1498                         vm_req_vmdaemon();
1499                         lsec = time_uptime;
1500                 }
1501         }
1502 #endif
1503                 
1504         /*
1505          * If we didn't get enough free pages, and we have skipped a vnode
1506          * in a writeable object, wakeup the sync daemon.  And kick swapout
1507          * if we did not get enough free pages.
1508          */
1509         if (vm_paging_target() > 0) {
1510                 if (vnodes_skipped && vm_page_count_min(0))
1511                         speedup_syncer(NULL);
1512 #if !defined(NO_SWAPPING)
1513                 if (vm_swap_enabled && vm_page_count_target()) {
1514                         atomic_set_int(&vm_pageout_req_swapout, VM_SWAP_NORMAL);
1515                         vm_req_vmdaemon();
1516                 }
1517 #endif
1518         }
1519
1520         /*
1521          * Handle catastrophic conditions.  Under good conditions we should
1522          * be at the target, well beyond our minimum.  If we could not even
1523          * reach our minimum the system is under heavy stress.  But just being
1524          * under heavy stress does not trigger process killing.
1525          *
1526          * We consider ourselves to have run out of memory if the swap pager
1527          * is full and avail_shortage is still positive.  The secondary check
1528          * ensures that we do not kill processes if the instantanious
1529          * availability is good, even if the pageout demon pass says it
1530          * couldn't get to the target.
1531          */
1532         if (swap_pager_almost_full &&
1533             pass > 0 &&
1534             (vm_page_count_min(recycle_count) || avail_shortage > 0)) {
1535                 kprintf("Warning: system low on memory+swap "
1536                         "shortage %d for %d ticks!\n",
1537                         avail_shortage, ticks - swap_fail_ticks);
1538         }
1539         if (swap_pager_full &&
1540             pass > 1 &&
1541             avail_shortage > 0 &&
1542             vm_paging_target() > 0 &&
1543             (unsigned int)(ticks - lastkillticks) >= hz) {
1544                 /*
1545                  * Kill something, maximum rate once per second to give
1546                  * the process time to free up sufficient memory.
1547                  */
1548                 lastkillticks = ticks;
1549                 info.bigproc = NULL;
1550                 info.bigsize = 0;
1551                 allproc_scan(vm_pageout_scan_callback, &info);
1552                 if (info.bigproc != NULL) {
1553                         info.bigproc->p_nice = PRIO_MIN;
1554                         info.bigproc->p_usched->resetpriority(
1555                                 FIRST_LWP_IN_PROC(info.bigproc));
1556                         atomic_set_int(&info.bigproc->p_flags, P_LOWMEMKILL);
1557                         killproc(info.bigproc, "out of swap space");
1558                         wakeup(&vmstats.v_free_count);
1559                         PRELE(info.bigproc);
1560                 }
1561         }
1562 }
1563
1564 static int
1565 vm_pageout_scan_callback(struct proc *p, void *data)
1566 {
1567         struct vm_pageout_scan_info *info = data;
1568         vm_offset_t size;
1569
1570         /*
1571          * Never kill system processes or init.  If we have configured swap
1572          * then try to avoid killing low-numbered pids.
1573          */
1574         if ((p->p_flags & P_SYSTEM) || (p->p_pid == 1) ||
1575             ((p->p_pid < 48) && (vm_swap_size != 0))) {
1576                 return (0);
1577         }
1578
1579         lwkt_gettoken(&p->p_token);
1580
1581         /*
1582          * if the process is in a non-running type state,
1583          * don't touch it.
1584          */
1585         if (p->p_stat != SACTIVE && p->p_stat != SSTOP && p->p_stat != SCORE) {
1586                 lwkt_reltoken(&p->p_token);
1587                 return (0);
1588         }
1589
1590         /*
1591          * Get the approximate process size.  Note that anonymous pages
1592          * with backing swap will be counted twice, but there should not
1593          * be too many such pages due to the stress the VM system is
1594          * under at this point.
1595          */
1596         size = vmspace_anonymous_count(p->p_vmspace) +
1597                 vmspace_swap_count(p->p_vmspace);
1598
1599         /*
1600          * If the this process is bigger than the biggest one
1601          * remember it.
1602          */
1603         if (info->bigsize < size) {
1604                 if (info->bigproc)
1605                         PRELE(info->bigproc);
1606                 PHOLD(p);
1607                 info->bigproc = p;
1608                 info->bigsize = size;
1609         }
1610         lwkt_reltoken(&p->p_token);
1611         lwkt_yield();
1612
1613         return(0);
1614 }
1615
1616 /*
1617  * This routine tries to maintain the pseudo LRU active queue,
1618  * so that during long periods of time where there is no paging,
1619  * that some statistic accumulation still occurs.  This code
1620  * helps the situation where paging just starts to occur.
1621  */
1622 static void
1623 vm_pageout_page_stats(int q)
1624 {
1625         static int fullintervalcount = 0;
1626         struct vm_page marker;
1627         vm_page_t m;
1628         int pcount, tpcount;            /* Number of pages to check */
1629         int page_shortage;
1630
1631         page_shortage = (vmstats.v_inactive_target + vmstats.v_cache_max +
1632                          vmstats.v_free_min) -
1633                         (vmstats.v_free_count + vmstats.v_inactive_count +
1634                          vmstats.v_cache_count);
1635
1636         if (page_shortage <= 0)
1637                 return;
1638
1639         pcount = vm_page_queues[PQ_ACTIVE + q].lcnt;
1640         fullintervalcount += vm_pageout_stats_interval;
1641         if (fullintervalcount < vm_pageout_full_stats_interval) {
1642                 tpcount = (vm_pageout_stats_max * pcount) /
1643                           vmstats.v_page_count + 1;
1644                 if (pcount > tpcount)
1645                         pcount = tpcount;
1646         } else {
1647                 fullintervalcount = 0;
1648         }
1649
1650         bzero(&marker, sizeof(marker));
1651         marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
1652         marker.queue = PQ_ACTIVE + q;
1653         marker.pc = q;
1654         marker.wire_count = 1;
1655
1656         vm_page_queues_spin_lock(PQ_ACTIVE + q);
1657         TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1658
1659         /*
1660          * Queue locked at top of loop to avoid stack marker issues.
1661          */
1662         while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
1663                pcount-- > 0)
1664         {
1665                 int actcount;
1666
1667                 KKASSERT(m->queue == PQ_ACTIVE + q);
1668                 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1669                 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
1670                                    &marker, pageq);
1671
1672                 /*
1673                  * Skip marker pages (atomic against other markers to avoid
1674                  * infinite hop-over scans).
1675                  */
1676                 if (m->flags & PG_MARKER)
1677                         continue;
1678
1679                 /*
1680                  * Ignore pages we can't busy
1681                  */
1682                 if (vm_page_busy_try(m, TRUE))
1683                         continue;
1684
1685                 /*
1686                  * Remaining operations run with the page busy and neither
1687                  * the page or the queue will be spin-locked.
1688                  */
1689                 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1690                 KKASSERT(m->queue == PQ_ACTIVE + q);
1691
1692                 /*
1693                  * We now have a safely busied page, the page and queue
1694                  * spinlocks have been released.
1695                  *
1696                  * Ignore held pages
1697                  */
1698                 if (m->hold_count) {
1699                         vm_page_wakeup(m);
1700                         goto next;
1701                 }
1702
1703                 /*
1704                  * Calculate activity
1705                  */
1706                 actcount = 0;
1707                 if (m->flags & PG_REFERENCED) {
1708                         vm_page_flag_clear(m, PG_REFERENCED);
1709                         actcount += 1;
1710                 }
1711                 actcount += pmap_ts_referenced(m);
1712
1713                 /*
1714                  * Update act_count and move page to end of queue.
1715                  */
1716                 if (actcount) {
1717                         m->act_count += ACT_ADVANCE + actcount;
1718                         if (m->act_count > ACT_MAX)
1719                                 m->act_count = ACT_MAX;
1720                         vm_page_and_queue_spin_lock(m);
1721                         if (m->queue - m->pc == PQ_ACTIVE) {
1722                                 TAILQ_REMOVE(
1723                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1724                                         m, pageq);
1725                                 TAILQ_INSERT_TAIL(
1726                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1727                                         m, pageq);
1728                         }
1729                         vm_page_and_queue_spin_unlock(m);
1730                         vm_page_wakeup(m);
1731                         goto next;
1732                 }
1733
1734                 if (m->act_count == 0) {
1735                         /*
1736                          * We turn off page access, so that we have
1737                          * more accurate RSS stats.  We don't do this
1738                          * in the normal page deactivation when the
1739                          * system is loaded VM wise, because the
1740                          * cost of the large number of page protect
1741                          * operations would be higher than the value
1742                          * of doing the operation.
1743                          *
1744                          * We use the marker to save our place so
1745                          * we can release the spin lock.  both (m)
1746                          * and (next) will be invalid.
1747                          */
1748                         vm_page_protect(m, VM_PROT_NONE);
1749                         vm_page_deactivate(m);
1750                 } else {
1751                         m->act_count -= min(m->act_count, ACT_DECLINE);
1752                         vm_page_and_queue_spin_lock(m);
1753                         if (m->queue - m->pc == PQ_ACTIVE) {
1754                                 TAILQ_REMOVE(
1755                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1756                                         m, pageq);
1757                                 TAILQ_INSERT_TAIL(
1758                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1759                                         m, pageq);
1760                         }
1761                         vm_page_and_queue_spin_unlock(m);
1762                 }
1763                 vm_page_wakeup(m);
1764 next:
1765                 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1766         }
1767
1768         /*
1769          * Remove our local marker
1770          *
1771          * Page queue still spin-locked.
1772          */
1773         TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1774         vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1775 }
1776
1777 static int
1778 vm_pageout_free_page_calc(vm_size_t count)
1779 {
1780         if (count < vmstats.v_page_count)
1781                  return 0;
1782         /*
1783          * free_reserved needs to include enough for the largest swap pager
1784          * structures plus enough for any pv_entry structs when paging.
1785          *
1786          * v_free_min           normal allocations
1787          * v_free_reserved      system allocations
1788          * v_pageout_free_min   allocations by pageout daemon
1789          * v_interrupt_free_min low level allocations (e.g swap structures)
1790          */
1791         if (vmstats.v_page_count > 1024)
1792                 vmstats.v_free_min = 64 + (vmstats.v_page_count - 1024) / 200;
1793         else
1794                 vmstats.v_free_min = 64;
1795
1796         /*
1797          * Make sure the vmmeter slop can't blow out our global minimums.
1798          *
1799          * However, to accomodate weird configurations (vkernels with many
1800          * cpus and little memory, or artifically reduced hw.physmem), do
1801          * not allow v_free_min to exceed 1/20 of ram or the pageout demon
1802          * will go out of control.
1803          */
1804         if (vmstats.v_free_min < VMMETER_SLOP_COUNT * ncpus * 10)
1805                 vmstats.v_free_min = VMMETER_SLOP_COUNT * ncpus * 10;
1806         if (vmstats.v_free_min > vmstats.v_page_count / 20)
1807                 vmstats.v_free_min = vmstats.v_page_count / 20;
1808
1809         vmstats.v_free_reserved = vmstats.v_free_min * 4 / 8 + 7;
1810         vmstats.v_free_severe = vmstats.v_free_min * 4 / 8 + 0;
1811         vmstats.v_pageout_free_min = vmstats.v_free_min * 2 / 8 + 7;
1812         vmstats.v_interrupt_free_min = vmstats.v_free_min * 1 / 8 + 7;
1813
1814         return 1;
1815 }
1816
1817
1818 /*
1819  * vm_pageout is the high level pageout daemon.
1820  *
1821  * No requirements.
1822  */
1823 static void
1824 vm_pageout_thread(void)
1825 {
1826         int pass;
1827         int q;
1828         int q1iterator = 0;
1829         int q2iterator = 0;
1830
1831         /*
1832          * Initialize some paging parameters.
1833          */
1834         curthread->td_flags |= TDF_SYSTHREAD;
1835
1836         vm_pageout_free_page_calc(vmstats.v_page_count);
1837
1838         /*
1839          * v_free_target and v_cache_min control pageout hysteresis.  Note
1840          * that these are more a measure of the VM cache queue hysteresis
1841          * then the VM free queue.  Specifically, v_free_target is the
1842          * high water mark (free+cache pages).
1843          *
1844          * v_free_reserved + v_cache_min (mostly means v_cache_min) is the
1845          * low water mark, while v_free_min is the stop.  v_cache_min must
1846          * be big enough to handle memory needs while the pageout daemon
1847          * is signalled and run to free more pages.
1848          */
1849         if (vmstats.v_free_count > 6144)
1850                 vmstats.v_free_target = 4 * vmstats.v_free_min +
1851                                         vmstats.v_free_reserved;
1852         else
1853                 vmstats.v_free_target = 2 * vmstats.v_free_min +
1854                                         vmstats.v_free_reserved;
1855
1856         /*
1857          * NOTE: With the new buffer cache b_act_count we want the default
1858          *       inactive target to be a percentage of available memory.
1859          *
1860          *       The inactive target essentially determines the minimum
1861          *       number of 'temporary' pages capable of caching one-time-use
1862          *       files when the VM system is otherwise full of pages
1863          *       belonging to multi-time-use files or active program data.
1864          *
1865          * NOTE: The inactive target is aggressively persued only if the
1866          *       inactive queue becomes too small.  If the inactive queue
1867          *       is large enough to satisfy page movement to free+cache
1868          *       then it is repopulated more slowly from the active queue.
1869          *       This allows a general inactive_target default to be set.
1870          *
1871          *       There is an issue here for processes which sit mostly idle
1872          *       'overnight', such as sshd, tcsh, and X.  Any movement from
1873          *       the active queue will eventually cause such pages to
1874          *       recycle eventually causing a lot of paging in the morning.
1875          *       To reduce the incidence of this pages cycled out of the
1876          *       buffer cache are moved directly to the inactive queue if
1877          *       they were only used once or twice.
1878          *
1879          *       The vfs.vm_cycle_point sysctl can be used to adjust this.
1880          *       Increasing the value (up to 64) increases the number of
1881          *       buffer recyclements which go directly to the inactive queue.
1882          */
1883         if (vmstats.v_free_count > 2048) {
1884                 vmstats.v_cache_min = vmstats.v_free_target;
1885                 vmstats.v_cache_max = 2 * vmstats.v_cache_min;
1886         } else {
1887                 vmstats.v_cache_min = 0;
1888                 vmstats.v_cache_max = 0;
1889         }
1890         vmstats.v_inactive_target = vmstats.v_free_count / 4;
1891
1892         /* XXX does not really belong here */
1893         if (vm_page_max_wired == 0)
1894                 vm_page_max_wired = vmstats.v_free_count / 3;
1895
1896         if (vm_pageout_stats_max == 0)
1897                 vm_pageout_stats_max = vmstats.v_free_target;
1898
1899         /*
1900          * Set interval in seconds for stats scan.
1901          */
1902         if (vm_pageout_stats_interval == 0)
1903                 vm_pageout_stats_interval = 5;
1904         if (vm_pageout_full_stats_interval == 0)
1905                 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
1906         
1907
1908         /*
1909          * Set maximum free per pass
1910          */
1911         if (vm_pageout_stats_free_max == 0)
1912                 vm_pageout_stats_free_max = 5;
1913
1914         swap_pager_swap_init();
1915         pass = 0;
1916
1917         /*
1918          * The pageout daemon is never done, so loop forever.
1919          */
1920         while (TRUE) {
1921                 int error;
1922                 int avail_shortage;
1923                 int inactive_shortage;
1924                 int vnodes_skipped = 0;
1925                 int recycle_count = 0;
1926                 int tmp;
1927
1928                 /*
1929                  * Wait for an action request.  If we timeout check to
1930                  * see if paging is needed (in case the normal wakeup
1931                  * code raced us).
1932                  */
1933                 if (vm_pages_needed == 0) {
1934                         error = tsleep(&vm_pages_needed,
1935                                        0, "psleep",
1936                                        vm_pageout_stats_interval * hz);
1937                         if (error &&
1938                             vm_paging_needed() == 0 &&
1939                             vm_pages_needed == 0) {
1940                                 for (q = 0; q < PQ_L2_SIZE; ++q)
1941                                         vm_pageout_page_stats(q);
1942                                 continue;
1943                         }
1944                         vm_pages_needed = 1;
1945                 }
1946
1947                 mycpu->gd_cnt.v_pdwakeups++;
1948
1949                 /*
1950                  * Scan for INACTIVE->CLEAN/PAGEOUT
1951                  *
1952                  * This routine tries to avoid thrashing the system with
1953                  * unnecessary activity.
1954                  *
1955                  * Calculate our target for the number of free+cache pages we
1956                  * want to get to.  This is higher then the number that causes
1957                  * allocations to stall (severe) in order to provide hysteresis,
1958                  * and if we don't make it all the way but get to the minimum
1959                  * we're happy.  Goose it a bit if there are multiple requests
1960                  * for memory.
1961                  *
1962                  * Don't reduce avail_shortage inside the loop or the
1963                  * PQAVERAGE() calculation will break.
1964                  *
1965                  * NOTE! deficit is differentiated from avail_shortage as
1966                  *       REQUIRING at least (deficit) pages to be cleaned,
1967                  *       even if the page queues are in good shape.  This
1968                  *       is used primarily for handling per-process
1969                  *       RLIMIT_RSS and may also see small values when
1970                  *       processes block due to low memory.
1971                  */
1972                 vmstats_rollup();
1973                 avail_shortage = vm_paging_target() + vm_pageout_deficit;
1974                 vm_pageout_deficit = 0;
1975
1976                 if (avail_shortage > 0) {
1977                         int delta = 0;
1978
1979                         for (q = 0; q < PQ_L2_SIZE; ++q) {
1980                                 delta += vm_pageout_scan_inactive(
1981                                             pass,
1982                                             (q + q1iterator) & PQ_L2_MASK,
1983                                             PQAVERAGE(avail_shortage),
1984                                             &vnodes_skipped);
1985                                 if (avail_shortage - delta <= 0)
1986                                         break;
1987                         }
1988                         avail_shortage -= delta;
1989                         q1iterator = q + 1;
1990                 }
1991
1992                 /*
1993                  * Figure out how many active pages we must deactivate.  If
1994                  * we were able to reach our target with just the inactive
1995                  * scan above we limit the number of active pages we
1996                  * deactivate to reduce unnecessary work.
1997                  */
1998                 vmstats_rollup();
1999                 inactive_shortage = vmstats.v_inactive_target -
2000                                     vmstats.v_inactive_count;
2001
2002                 /*
2003                  * If we were unable to free sufficient inactive pages to
2004                  * satisfy the free/cache queue requirements then simply
2005                  * reaching the inactive target may not be good enough.
2006                  * Try to deactivate pages in excess of the target based
2007                  * on the shortfall.
2008                  *
2009                  * However to prevent thrashing the VM system do not
2010                  * deactivate more than an additional 1/10 the inactive
2011                  * target's worth of active pages.
2012                  */
2013                 if (avail_shortage > 0) {
2014                         tmp = avail_shortage * 2;
2015                         if (tmp > vmstats.v_inactive_target / 10)
2016                                 tmp = vmstats.v_inactive_target / 10;
2017                         inactive_shortage += tmp;
2018                 }
2019
2020                 /*
2021                  * Only trigger a pmap cleanup on inactive shortage.
2022                  */
2023                 if (inactive_shortage > 0) {
2024                         pmap_collect();
2025                 }
2026
2027                 /*
2028                  * Scan for ACTIVE->INACTIVE
2029                  *
2030                  * Only trigger on inactive shortage.  Triggering on
2031                  * avail_shortage can starve the active queue with
2032                  * unnecessary active->inactive transitions and destroy
2033                  * performance.
2034                  */
2035                 if (/*avail_shortage > 0 ||*/ inactive_shortage > 0) {
2036                         int delta = 0;
2037
2038                         for (q = 0; q < PQ_L2_SIZE; ++q) {
2039                                 delta += vm_pageout_scan_active(
2040                                                 pass,
2041                                                 (q + q2iterator) & PQ_L2_MASK,
2042                                                 PQAVERAGE(avail_shortage),
2043                                                 PQAVERAGE(inactive_shortage),
2044                                                 &recycle_count);
2045                                 if (inactive_shortage - delta <= 0 &&
2046                                     avail_shortage - delta <= 0) {
2047                                         break;
2048                                 }
2049                         }
2050                         inactive_shortage -= delta;
2051                         avail_shortage -= delta;
2052                         q2iterator = q + 1;
2053                 }
2054
2055                 /*
2056                  * Scan for CACHE->FREE
2057                  *
2058                  * Finally free enough cache pages to meet our free page
2059                  * requirement and take more drastic measures if we are
2060                  * still in trouble.
2061                  */
2062                 vmstats_rollup();
2063                 vm_pageout_scan_cache(avail_shortage, pass,
2064                                       vnodes_skipped, recycle_count);
2065
2066                 /*
2067                  * Wait for more work.
2068                  */
2069                 if (avail_shortage > 0) {
2070                         ++pass;
2071                         if (pass < 10 && vm_pages_needed > 1) {
2072                                 /*
2073                                  * Normal operation, additional processes
2074                                  * have already kicked us.  Retry immediately
2075                                  * unless swap space is completely full in
2076                                  * which case delay a bit.
2077                                  */
2078                                 if (swap_pager_full) {
2079                                         tsleep(&vm_pages_needed, 0, "pdelay",
2080                                                 hz / 5);
2081                                 } /* else immediate retry */
2082                         } else if (pass < 10) {
2083                                 /*
2084                                  * Normal operation, fewer processes.  Delay
2085                                  * a bit but allow wakeups.
2086                                  */
2087                                 vm_pages_needed = 0;
2088                                 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
2089                                 vm_pages_needed = 1;
2090                         } else if (swap_pager_full == 0) {
2091                                 /*
2092                                  * We've taken too many passes, forced delay.
2093                                  */
2094                                 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
2095                         } else {
2096                                 /*
2097                                  * Running out of memory, catastrophic
2098                                  * back-off to one-second intervals.
2099                                  */
2100                                 tsleep(&vm_pages_needed, 0, "pdelay", hz);
2101                         }
2102                 } else if (vm_pages_needed) {
2103                         /*
2104                          * Interlocked wakeup of waiters (non-optional).
2105                          *
2106                          * Similar to vm_page_free_wakeup() in vm_page.c,
2107                          * wake
2108                          */
2109                         pass = 0;
2110                         if (!vm_page_count_min(vm_page_free_hysteresis) ||
2111                             !vm_page_count_target()) {
2112                                 vm_pages_needed = 0;
2113                                 wakeup(&vmstats.v_free_count);
2114                         }
2115                 } else {
2116                         pass = 0;
2117                 }
2118         }
2119 }
2120
2121 static struct kproc_desc page_kp = {
2122         "pagedaemon",
2123         vm_pageout_thread,
2124         &pagethread
2125 };
2126 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp);
2127
2128
2129 /*
2130  * Called after allocating a page out of the cache or free queue
2131  * to possibly wake the pagedaemon up to replentish our supply.
2132  *
2133  * We try to generate some hysteresis by waking the pagedaemon up
2134  * when our free+cache pages go below the free_min+cache_min level.
2135  * The pagedaemon tries to get the count back up to at least the
2136  * minimum, and through to the target level if possible.
2137  *
2138  * If the pagedaemon is already active bump vm_pages_needed as a hint
2139  * that there are even more requests pending.
2140  *
2141  * SMP races ok?
2142  * No requirements.
2143  */
2144 void
2145 pagedaemon_wakeup(void)
2146 {
2147         if (vm_paging_needed() && curthread != pagethread) {
2148                 if (vm_pages_needed == 0) {
2149                         vm_pages_needed = 1;    /* SMP race ok */
2150                         wakeup(&vm_pages_needed);
2151                 } else if (vm_page_count_min(0)) {
2152                         ++vm_pages_needed;      /* SMP race ok */
2153                 }
2154         }
2155 }
2156
2157 #if !defined(NO_SWAPPING)
2158
2159 /*
2160  * SMP races ok?
2161  * No requirements.
2162  */
2163 static void
2164 vm_req_vmdaemon(void)
2165 {
2166         static int lastrun = 0;
2167
2168         if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
2169                 wakeup(&vm_daemon_needed);
2170                 lastrun = ticks;
2171         }
2172 }
2173
2174 static int vm_daemon_callback(struct proc *p, void *data __unused);
2175
2176 /*
2177  * No requirements.
2178  */
2179 static void
2180 vm_daemon(void)
2181 {
2182         int req_swapout;
2183
2184         while (TRUE) {
2185                 tsleep(&vm_daemon_needed, 0, "psleep", 0);
2186                 req_swapout = atomic_swap_int(&vm_pageout_req_swapout, 0);
2187
2188                 /*
2189                  * forced swapouts
2190                  */
2191                 if (req_swapout)
2192                         swapout_procs(vm_pageout_req_swapout);
2193
2194                 /*
2195                  * scan the processes for exceeding their rlimits or if
2196                  * process is swapped out -- deactivate pages
2197                  */
2198                 allproc_scan(vm_daemon_callback, NULL);
2199         }
2200 }
2201
2202 static int
2203 vm_daemon_callback(struct proc *p, void *data __unused)
2204 {
2205         struct vmspace *vm;
2206         vm_pindex_t limit, size;
2207
2208         /*
2209          * if this is a system process or if we have already
2210          * looked at this process, skip it.
2211          */
2212         lwkt_gettoken(&p->p_token);
2213
2214         if (p->p_flags & (P_SYSTEM | P_WEXIT)) {
2215                 lwkt_reltoken(&p->p_token);
2216                 return (0);
2217         }
2218
2219         /*
2220          * if the process is in a non-running type state,
2221          * don't touch it.
2222          */
2223         if (p->p_stat != SACTIVE && p->p_stat != SSTOP && p->p_stat != SCORE) {
2224                 lwkt_reltoken(&p->p_token);
2225                 return (0);
2226         }
2227
2228         /*
2229          * get a limit
2230          */
2231         limit = OFF_TO_IDX(qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
2232                                 p->p_rlimit[RLIMIT_RSS].rlim_max));
2233
2234         /*
2235          * let processes that are swapped out really be
2236          * swapped out.  Set the limit to nothing to get as
2237          * many pages out to swap as possible.
2238          */
2239         if (p->p_flags & P_SWAPPEDOUT)
2240                 limit = 0;
2241
2242         vm = p->p_vmspace;
2243         vmspace_hold(vm);
2244         size = pmap_resident_tlnw_count(&vm->vm_pmap);
2245         if (limit >= 0 && size > 4096 &&
2246             size - 4096 >= limit && vm_pageout_memuse_mode >= 1) {
2247                 vm_pageout_map_deactivate_pages(&vm->vm_map, limit);
2248         }
2249         vmspace_drop(vm);
2250
2251         lwkt_reltoken(&p->p_token);
2252
2253         return (0);
2254 }
2255
2256 #endif