Merge branch 'vendor/LIBARCHIVE'
[dragonfly.git] / sys / vm / vm_pageout.c
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * The Mach Operating System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *      from: @(#)vm_pageout.c  7.4 (Berkeley) 5/7/91
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $FreeBSD: src/sys/vm/vm_pageout.c,v 1.151.2.15 2002/12/29 18:21:04 dillon Exp $
65  */
66
67 /*
68  *      The proverbial page-out daemon.
69  */
70
71 #include "opt_vm.h"
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/kernel.h>
75 #include <sys/proc.h>
76 #include <sys/kthread.h>
77 #include <sys/resourcevar.h>
78 #include <sys/signalvar.h>
79 #include <sys/vnode.h>
80 #include <sys/vmmeter.h>
81 #include <sys/sysctl.h>
82
83 #include <vm/vm.h>
84 #include <vm/vm_param.h>
85 #include <sys/lock.h>
86 #include <vm/vm_object.h>
87 #include <vm/vm_page.h>
88 #include <vm/vm_map.h>
89 #include <vm/vm_pageout.h>
90 #include <vm/vm_pager.h>
91 #include <vm/swap_pager.h>
92 #include <vm/vm_extern.h>
93
94 #include <sys/thread2.h>
95 #include <sys/spinlock2.h>
96 #include <vm/vm_page2.h>
97
98 /*
99  * System initialization
100  */
101
102 /* the kernel process "vm_pageout"*/
103 static int vm_pageout_clean (vm_page_t);
104 static int vm_pageout_free_page_calc (vm_size_t count);
105 struct thread *pagethread;
106
107 #if !defined(NO_SWAPPING)
108 /* the kernel process "vm_daemon"*/
109 static void vm_daemon (void);
110 static struct   thread *vmthread;
111
112 static struct kproc_desc vm_kp = {
113         "vmdaemon",
114         vm_daemon,
115         &vmthread
116 };
117 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp);
118 #endif
119
120 int vm_pages_needed=0;          /* Event on which pageout daemon sleeps */
121 int vm_pageout_deficit=0;       /* Estimated number of pages deficit */
122 int vm_pageout_pages_needed=0;  /* pageout daemon needs pages */
123 int vm_page_free_hysteresis = 16;
124
125 #if !defined(NO_SWAPPING)
126 static int vm_pageout_req_swapout;      /* XXX */
127 static int vm_daemon_needed;
128 #endif
129 static int vm_max_launder = 4096;
130 static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
131 static int vm_pageout_full_stats_interval = 0;
132 static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0;
133 static int defer_swap_pageouts=0;
134 static int disable_swap_pageouts=0;
135 static u_int vm_anonmem_decline = ACT_DECLINE;
136 static u_int vm_filemem_decline = ACT_DECLINE * 2;
137
138 #if defined(NO_SWAPPING)
139 static int vm_swap_enabled=0;
140 static int vm_swap_idle_enabled=0;
141 #else
142 static int vm_swap_enabled=1;
143 static int vm_swap_idle_enabled=0;
144 #endif
145
146 SYSCTL_UINT(_vm, VM_PAGEOUT_ALGORITHM, anonmem_decline,
147         CTLFLAG_RW, &vm_anonmem_decline, 0, "active->inactive anon memory");
148
149 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, filemem_decline,
150         CTLFLAG_RW, &vm_filemem_decline, 0, "active->inactive file cache");
151
152 SYSCTL_INT(_vm, OID_AUTO, page_free_hysteresis,
153         CTLFLAG_RW, &vm_page_free_hysteresis, 0,
154         "Free more pages than the minimum required");
155
156 SYSCTL_INT(_vm, OID_AUTO, max_launder,
157         CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout");
158
159 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
160         CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
161
162 SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
163         CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
164
165 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
166         CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
167
168 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
169         CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented");
170
171 #if defined(NO_SWAPPING)
172 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
173         CTLFLAG_RD, &vm_swap_enabled, 0, "");
174 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
175         CTLFLAG_RD, &vm_swap_idle_enabled, 0, "");
176 #else
177 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
178         CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
179 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
180         CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
181 #endif
182
183 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
184         CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
185
186 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
187         CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
188
189 static int pageout_lock_miss;
190 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
191         CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
192
193 int vm_page_max_wired;          /* XXX max # of wired pages system-wide */
194
195 #if !defined(NO_SWAPPING)
196 typedef void freeer_fcn_t (vm_map_t, vm_object_t, vm_pindex_t, int);
197 static void vm_pageout_map_deactivate_pages (vm_map_t, vm_pindex_t);
198 static freeer_fcn_t vm_pageout_object_deactivate_pages;
199 static void vm_req_vmdaemon (void);
200 #endif
201 static void vm_pageout_page_stats(int q);
202
203 /*
204  * Calculate approximately how many pages on each queue to try to
205  * clean.  An exact calculation creates an edge condition when the
206  * queues are unbalanced so add significant slop.  The queue scans
207  * will stop early when targets are reached and will start where they
208  * left off on the next pass.
209  */
210 static __inline int
211 PQAVERAGE(int n)
212 {
213         int avg;
214
215         if (n >= 0) {
216                 avg = ((n + (PQ_L2_SIZE - 1)) / PQ_L2_SIZE + 1);
217                 avg += avg / 2 + 1;
218         } else {
219                 avg = ((n - (PQ_L2_SIZE - 1)) / PQ_L2_SIZE - 1);
220                 avg += avg / 2 - 1;
221         }
222         return avg;
223 }
224
225 /*
226  * vm_pageout_clean:
227  *
228  * Clean the page and remove it from the laundry.  The page must not be
229  * busy on-call.
230  * 
231  * We set the busy bit to cause potential page faults on this page to
232  * block.  Note the careful timing, however, the busy bit isn't set till
233  * late and we cannot do anything that will mess with the page.
234  */
235 static int
236 vm_pageout_clean(vm_page_t m)
237 {
238         vm_object_t object;
239         vm_page_t mc[BLIST_MAX_ALLOC];
240         int error;
241         int ib, is, page_base;
242         vm_pindex_t pindex = m->pindex;
243
244         object = m->object;
245
246         /*
247          * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
248          * with the new swapper, but we could have serious problems paging
249          * out other object types if there is insufficient memory.  
250          *
251          * Unfortunately, checking free memory here is far too late, so the
252          * check has been moved up a procedural level.
253          */
254
255         /*
256          * Don't mess with the page if it's busy, held, or special
257          *
258          * XXX do we really need to check hold_count here?  hold_count
259          * isn't supposed to mess with vm_page ops except prevent the
260          * page from being reused.
261          */
262         if (m->hold_count != 0 || (m->flags & PG_UNMANAGED)) {
263                 vm_page_wakeup(m);
264                 return 0;
265         }
266
267         /*
268          * Place page in cluster.  Align cluster for optimal swap space
269          * allocation (whether it is swap or not).  This is typically ~16-32
270          * pages, which also tends to align the cluster to multiples of the
271          * filesystem block size if backed by a filesystem.
272          */
273         page_base = pindex % BLIST_MAX_ALLOC;
274         mc[page_base] = m;
275         ib = page_base - 1;
276         is = page_base + 1;
277
278         /*
279          * Scan object for clusterable pages.
280          *
281          * We can cluster ONLY if: ->> the page is NOT
282          * clean, wired, busy, held, or mapped into a
283          * buffer, and one of the following:
284          * 1) The page is inactive, or a seldom used
285          *    active page.
286          * -or-
287          * 2) we force the issue.
288          *
289          * During heavy mmap/modification loads the pageout
290          * daemon can really fragment the underlying file
291          * due to flushing pages out of order and not trying
292          * align the clusters (which leave sporatic out-of-order
293          * holes).  To solve this problem we do the reverse scan
294          * first and attempt to align our cluster, then do a 
295          * forward scan if room remains.
296          */
297
298         vm_object_hold(object);
299         while (ib >= 0) {
300                 vm_page_t p;
301
302                 p = vm_page_lookup_busy_try(object, pindex - page_base + ib,
303                                             TRUE, &error);
304                 if (error || p == NULL)
305                         break;
306                 if ((p->queue - p->pc) == PQ_CACHE ||
307                     (p->flags & PG_UNMANAGED)) {
308                         vm_page_wakeup(p);
309                         break;
310                 }
311                 vm_page_test_dirty(p);
312                 if (((p->dirty & p->valid) == 0 &&
313                      (p->flags & PG_NEED_COMMIT) == 0) ||
314                     p->queue - p->pc != PQ_INACTIVE ||
315                     p->wire_count != 0 ||       /* may be held by buf cache */
316                     p->hold_count != 0) {       /* may be undergoing I/O */
317                         vm_page_wakeup(p);
318                         break;
319                 }
320                 mc[ib] = p;
321                 --ib;
322         }
323         ++ib;   /* fixup */
324
325         while (is < BLIST_MAX_ALLOC &&
326                pindex - page_base + is < object->size) {
327                 vm_page_t p;
328
329                 p = vm_page_lookup_busy_try(object, pindex - page_base + is,
330                                             TRUE, &error);
331                 if (error || p == NULL)
332                         break;
333                 if (((p->queue - p->pc) == PQ_CACHE) ||
334                     (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) {
335                         vm_page_wakeup(p);
336                         break;
337                 }
338                 vm_page_test_dirty(p);
339                 if (((p->dirty & p->valid) == 0 &&
340                      (p->flags & PG_NEED_COMMIT) == 0) ||
341                     p->queue - p->pc != PQ_INACTIVE ||
342                     p->wire_count != 0 ||       /* may be held by buf cache */
343                     p->hold_count != 0) {       /* may be undergoing I/O */
344                         vm_page_wakeup(p);
345                         break;
346                 }
347                 mc[is] = p;
348                 ++is;
349         }
350
351         vm_object_drop(object);
352
353         /*
354          * we allow reads during pageouts...
355          */
356         return vm_pageout_flush(&mc[ib], is - ib, 0);
357 }
358
359 /*
360  * vm_pageout_flush() - launder the given pages
361  *
362  *      The given pages are laundered.  Note that we setup for the start of
363  *      I/O ( i.e. busy the page ), mark it read-only, and bump the object
364  *      reference count all in here rather then in the parent.  If we want
365  *      the parent to do more sophisticated things we may have to change
366  *      the ordering.
367  *
368  *      The pages in the array must be busied by the caller and will be
369  *      unbusied by this function.
370  */
371 int
372 vm_pageout_flush(vm_page_t *mc, int count, int flags)
373 {
374         vm_object_t object;
375         int pageout_status[count];
376         int numpagedout = 0;
377         int i;
378
379         /*
380          * Initiate I/O.  Bump the vm_page_t->busy counter.
381          */
382         for (i = 0; i < count; i++) {
383                 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
384                         ("vm_pageout_flush page %p index %d/%d: partially "
385                          "invalid page", mc[i], i, count));
386                 vm_page_io_start(mc[i]);
387         }
388
389         /*
390          * We must make the pages read-only.  This will also force the
391          * modified bit in the related pmaps to be cleared.  The pager
392          * cannot clear the bit for us since the I/O completion code
393          * typically runs from an interrupt.  The act of making the page
394          * read-only handles the case for us.
395          *
396          * Then we can unbusy the pages, we still hold a reference by virtue
397          * of our soft-busy.
398          */
399         for (i = 0; i < count; i++) {
400                 vm_page_protect(mc[i], VM_PROT_READ);
401                 vm_page_wakeup(mc[i]);
402         }
403
404         object = mc[0]->object;
405         vm_object_pip_add(object, count);
406
407         vm_pager_put_pages(object, mc, count,
408             (flags | ((object == &kernel_object) ? VM_PAGER_PUT_SYNC : 0)),
409             pageout_status);
410
411         for (i = 0; i < count; i++) {
412                 vm_page_t mt = mc[i];
413
414                 switch (pageout_status[i]) {
415                 case VM_PAGER_OK:
416                         numpagedout++;
417                         break;
418                 case VM_PAGER_PEND:
419                         numpagedout++;
420                         break;
421                 case VM_PAGER_BAD:
422                         /*
423                          * Page outside of range of object. Right now we
424                          * essentially lose the changes by pretending it
425                          * worked.
426                          */
427                         vm_page_busy_wait(mt, FALSE, "pgbad");
428                         pmap_clear_modify(mt);
429                         vm_page_undirty(mt);
430                         vm_page_wakeup(mt);
431                         break;
432                 case VM_PAGER_ERROR:
433                 case VM_PAGER_FAIL:
434                         /*
435                          * A page typically cannot be paged out when we
436                          * have run out of swap.  We leave the page
437                          * marked inactive and will try to page it out
438                          * again later.
439                          *
440                          * Starvation of the active page list is used to
441                          * determine when the system is massively memory
442                          * starved.
443                          */
444                         break;
445                 case VM_PAGER_AGAIN:
446                         break;
447                 }
448
449                 /*
450                  * If the operation is still going, leave the page busy to
451                  * block all other accesses. Also, leave the paging in
452                  * progress indicator set so that we don't attempt an object
453                  * collapse.
454                  *
455                  * For any pages which have completed synchronously, 
456                  * deactivate the page if we are under a severe deficit.
457                  * Do not try to enter them into the cache, though, they
458                  * might still be read-heavy.
459                  */
460                 if (pageout_status[i] != VM_PAGER_PEND) {
461                         vm_page_busy_wait(mt, FALSE, "pgouw");
462                         if (vm_page_count_severe())
463                                 vm_page_deactivate(mt);
464 #if 0
465                         if (!vm_page_count_severe() || !vm_page_try_to_cache(mt))
466                                 vm_page_protect(mt, VM_PROT_READ);
467 #endif
468                         vm_page_io_finish(mt);
469                         vm_page_wakeup(mt);
470                         vm_object_pip_wakeup(object);
471                 }
472         }
473         return numpagedout;
474 }
475
476 #if !defined(NO_SWAPPING)
477 /*
478  * deactivate enough pages to satisfy the inactive target
479  * requirements or if vm_page_proc_limit is set, then
480  * deactivate all of the pages in the object and its
481  * backing_objects.
482  *
483  * The map must be locked.
484  * The caller must hold the vm_object.
485  */
486 static int vm_pageout_object_deactivate_pages_callback(vm_page_t, void *);
487
488 static void
489 vm_pageout_object_deactivate_pages(vm_map_t map, vm_object_t object,
490                                    vm_pindex_t desired, int map_remove_only)
491 {
492         struct rb_vm_page_scan_info info;
493         vm_object_t lobject;
494         vm_object_t tobject;
495         int remove_mode;
496
497         ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
498         lobject = object;
499
500         while (lobject) {
501                 if (pmap_resident_count(vm_map_pmap(map)) <= desired)
502                         break;
503                 if (lobject->type == OBJT_DEVICE ||
504                     lobject->type == OBJT_MGTDEVICE ||
505                     lobject->type == OBJT_PHYS)
506                         break;
507                 if (lobject->paging_in_progress)
508                         break;
509
510                 remove_mode = map_remove_only;
511                 if (lobject->shadow_count > 1)
512                         remove_mode = 1;
513
514                 /*
515                  * scan the objects entire memory queue.  We hold the
516                  * object's token so the scan should not race anything.
517                  */
518                 info.limit = remove_mode;
519                 info.map = map;
520                 info.desired = desired;
521                 vm_page_rb_tree_RB_SCAN(&lobject->rb_memq, NULL,
522                                 vm_pageout_object_deactivate_pages_callback,
523                                 &info
524                 );
525                 while ((tobject = lobject->backing_object) != NULL) {
526                         KKASSERT(tobject != object);
527                         vm_object_hold(tobject);
528                         if (tobject == lobject->backing_object)
529                                 break;
530                         vm_object_drop(tobject);
531                 }
532                 if (lobject != object) {
533                         if (tobject)
534                                 vm_object_lock_swap();
535                         vm_object_drop(lobject);
536                         /* leaves tobject locked & at top */
537                 }
538                 lobject = tobject;
539         }
540         if (lobject != object)
541                 vm_object_drop(lobject);        /* NULL ok */
542 }
543
544 /*
545  * The caller must hold the vm_object.
546  */
547 static int
548 vm_pageout_object_deactivate_pages_callback(vm_page_t p, void *data)
549 {
550         struct rb_vm_page_scan_info *info = data;
551         int actcount;
552
553         if (pmap_resident_count(vm_map_pmap(info->map)) <= info->desired) {
554                 return(-1);
555         }
556         mycpu->gd_cnt.v_pdpages++;
557
558         if (vm_page_busy_try(p, TRUE))
559                 return(0);
560         if (p->wire_count || p->hold_count || (p->flags & PG_UNMANAGED)) {
561                 vm_page_wakeup(p);
562                 return(0);
563         }
564         if (!pmap_page_exists_quick(vm_map_pmap(info->map), p)) {
565                 vm_page_wakeup(p);
566                 return(0);
567         }
568
569         actcount = pmap_ts_referenced(p);
570         if (actcount) {
571                 vm_page_flag_set(p, PG_REFERENCED);
572         } else if (p->flags & PG_REFERENCED) {
573                 actcount = 1;
574         }
575
576         vm_page_and_queue_spin_lock(p);
577         if (p->queue - p->pc != PQ_ACTIVE && (p->flags & PG_REFERENCED)) {
578                 vm_page_and_queue_spin_unlock(p);
579                 vm_page_activate(p);
580                 p->act_count += actcount;
581                 vm_page_flag_clear(p, PG_REFERENCED);
582         } else if (p->queue - p->pc == PQ_ACTIVE) {
583                 if ((p->flags & PG_REFERENCED) == 0) {
584                         p->act_count -= min(p->act_count, ACT_DECLINE);
585                         if (!info->limit &&
586                             (vm_pageout_algorithm || (p->act_count == 0))) {
587                                 vm_page_and_queue_spin_unlock(p);
588                                 vm_page_protect(p, VM_PROT_NONE);
589                                 vm_page_deactivate(p);
590                         } else {
591                                 TAILQ_REMOVE(&vm_page_queues[p->queue].pl,
592                                              p, pageq);
593                                 TAILQ_INSERT_TAIL(&vm_page_queues[p->queue].pl,
594                                                   p, pageq);
595                                 vm_page_and_queue_spin_unlock(p);
596                         }
597                 } else {
598                         vm_page_and_queue_spin_unlock(p);
599                         vm_page_activate(p);
600                         vm_page_flag_clear(p, PG_REFERENCED);
601
602                         vm_page_and_queue_spin_lock(p);
603                         if (p->queue - p->pc == PQ_ACTIVE) {
604                                 if (p->act_count < (ACT_MAX - ACT_ADVANCE))
605                                         p->act_count += ACT_ADVANCE;
606                                 TAILQ_REMOVE(&vm_page_queues[p->queue].pl,
607                                              p, pageq);
608                                 TAILQ_INSERT_TAIL(&vm_page_queues[p->queue].pl,
609                                                   p, pageq);
610                         }
611                         vm_page_and_queue_spin_unlock(p);
612                 }
613         } else if (p->queue - p->pc == PQ_INACTIVE) {
614                 vm_page_and_queue_spin_unlock(p);
615                 vm_page_protect(p, VM_PROT_NONE);
616         } else {
617                 vm_page_and_queue_spin_unlock(p);
618         }
619         vm_page_wakeup(p);
620         return(0);
621 }
622
623 /*
624  * Deactivate some number of pages in a map, try to do it fairly, but
625  * that is really hard to do.
626  */
627 static void
628 vm_pageout_map_deactivate_pages(vm_map_t map, vm_pindex_t desired)
629 {
630         vm_map_entry_t tmpe;
631         vm_object_t obj, bigobj;
632         int nothingwired;
633
634         if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT)) {
635                 return;
636         }
637
638         bigobj = NULL;
639         nothingwired = TRUE;
640
641         /*
642          * first, search out the biggest object, and try to free pages from
643          * that.
644          */
645         tmpe = map->header.next;
646         while (tmpe != &map->header) {
647                 switch(tmpe->maptype) {
648                 case VM_MAPTYPE_NORMAL:
649                 case VM_MAPTYPE_VPAGETABLE:
650                         obj = tmpe->object.vm_object;
651                         if ((obj != NULL) && (obj->shadow_count <= 1) &&
652                                 ((bigobj == NULL) ||
653                                  (bigobj->resident_page_count < obj->resident_page_count))) {
654                                 bigobj = obj;
655                         }
656                         break;
657                 default:
658                         break;
659                 }
660                 if (tmpe->wired_count > 0)
661                         nothingwired = FALSE;
662                 tmpe = tmpe->next;
663         }
664
665         if (bigobj)  {
666                 vm_object_hold(bigobj);
667                 vm_pageout_object_deactivate_pages(map, bigobj, desired, 0);
668                 vm_object_drop(bigobj);
669         }
670
671         /*
672          * Next, hunt around for other pages to deactivate.  We actually
673          * do this search sort of wrong -- .text first is not the best idea.
674          */
675         tmpe = map->header.next;
676         while (tmpe != &map->header) {
677                 if (pmap_resident_count(vm_map_pmap(map)) <= desired)
678                         break;
679                 switch(tmpe->maptype) {
680                 case VM_MAPTYPE_NORMAL:
681                 case VM_MAPTYPE_VPAGETABLE:
682                         obj = tmpe->object.vm_object;
683                         if (obj) {
684                                 vm_object_hold(obj);
685                                 vm_pageout_object_deactivate_pages(map, obj, desired, 0);
686                                 vm_object_drop(obj);
687                         }
688                         break;
689                 default:
690                         break;
691                 }
692                 tmpe = tmpe->next;
693         }
694
695         /*
696          * Remove all mappings if a process is swapped out, this will free page
697          * table pages.
698          */
699         if (desired == 0 && nothingwired)
700                 pmap_remove(vm_map_pmap(map),
701                             VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
702         vm_map_unlock(map);
703 }
704 #endif
705
706 /*
707  * Called when the pageout scan wants to free a page.  We no longer
708  * try to cycle the vm_object here with a reference & dealloc, which can
709  * cause a non-trivial object collapse in a critical path.
710  *
711  * It is unclear why we cycled the ref_count in the past, perhaps to try
712  * to optimize shadow chain collapses but I don't quite see why it would
713  * be necessary.  An OBJ_DEAD object should terminate any and all vm_pages
714  * synchronously and not have to be kicked-start.
715  */
716 static void
717 vm_pageout_page_free(vm_page_t m) 
718 {
719         vm_page_protect(m, VM_PROT_NONE);
720         vm_page_free(m);
721 }
722
723 /*
724  * vm_pageout_scan does the dirty work for the pageout daemon.
725  */
726 struct vm_pageout_scan_info {
727         struct proc *bigproc;
728         vm_offset_t bigsize;
729 };
730
731 static int vm_pageout_scan_callback(struct proc *p, void *data);
732
733 static int
734 vm_pageout_scan_inactive(int pass, int q, int avail_shortage,
735                          int *vnodes_skippedp)
736 {
737         vm_page_t m;
738         struct vm_page marker;
739         struct vnode *vpfailed;         /* warning, allowed to be stale */
740         int maxscan;
741         int count;
742         int delta = 0;
743         vm_object_t object;
744         int actcount;
745         int maxlaunder;
746
747         /*
748          * Start scanning the inactive queue for pages we can move to the
749          * cache or free.  The scan will stop when the target is reached or
750          * we have scanned the entire inactive queue.  Note that m->act_count
751          * is not used to form decisions for the inactive queue, only for the
752          * active queue.
753          *
754          * maxlaunder limits the number of dirty pages we flush per scan.
755          * For most systems a smaller value (16 or 32) is more robust under
756          * extreme memory and disk pressure because any unnecessary writes
757          * to disk can result in extreme performance degredation.  However,
758          * systems with excessive dirty pages (especially when MAP_NOSYNC is
759          * used) will die horribly with limited laundering.  If the pageout
760          * daemon cannot clean enough pages in the first pass, we let it go
761          * all out in succeeding passes.
762          */
763         if ((maxlaunder = vm_max_launder) <= 1)
764                 maxlaunder = 1;
765         if (pass)
766                 maxlaunder = 10000;
767
768         /*
769          * Initialize our marker
770          */
771         bzero(&marker, sizeof(marker));
772         marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
773         marker.queue = PQ_INACTIVE + q;
774         marker.pc = q;
775         marker.wire_count = 1;
776
777         /*
778          * Inactive queue scan.
779          *
780          * NOTE: The vm_page must be spinlocked before the queue to avoid
781          *       deadlocks, so it is easiest to simply iterate the loop
782          *       with the queue unlocked at the top.
783          */
784         vpfailed = NULL;
785
786         vm_page_queues_spin_lock(PQ_INACTIVE + q);
787         TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
788         maxscan = vm_page_queues[PQ_INACTIVE + q].lcnt;
789
790         /*
791          * Queue locked at top of loop to avoid stack marker issues.
792          */
793         while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
794                maxscan-- > 0 && avail_shortage - delta > 0)
795         {
796                 KKASSERT(m->queue - m->pc == PQ_INACTIVE);
797                 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl,
798                              &marker, pageq);
799                 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE + q].pl, m,
800                                    &marker, pageq);
801                 mycpu->gd_cnt.v_pdpages++;
802
803                 /*
804                  * Skip marker pages (atomic against other markers to avoid
805                  * infinite hop-over scans).
806                  */
807                 if (m->flags & PG_MARKER)
808                         continue;
809
810                 /*
811                  * Try to busy the page.  Don't mess with pages which are
812                  * already busy or reorder them in the queue.
813                  */
814                 if (vm_page_busy_try(m, TRUE))
815                         continue;
816
817                 /*
818                  * Remaining operations run with the page busy and neither
819                  * the page or the queue will be spin-locked.
820                  */
821                 vm_page_queues_spin_unlock(PQ_INACTIVE + q);
822                 KKASSERT(m->queue - m->pc == PQ_INACTIVE);
823                 lwkt_yield();
824
825                 /*
826                  * It is possible for a page to be busied ad-hoc (e.g. the
827                  * pmap_collect() code) and wired and race against the
828                  * allocation of a new page.  vm_page_alloc() may be forced
829                  * to deactivate the wired page in which case it winds up
830                  * on the inactive queue and must be handled here.  We
831                  * correct the problem simply by unqueuing the page.
832                  */
833                 if (m->wire_count) {
834                         vm_page_unqueue_nowakeup(m);
835                         vm_page_wakeup(m);
836                         kprintf("WARNING: pagedaemon: wired page on "
837                                 "inactive queue %p\n", m);
838                         goto next;
839                 }
840
841                 /*
842                  * A held page may be undergoing I/O, so skip it.
843                  */
844                 if (m->hold_count) {
845                         vm_page_and_queue_spin_lock(m);
846                         if (m->queue - m->pc == PQ_INACTIVE) {
847                                 TAILQ_REMOVE(
848                                         &vm_page_queues[PQ_INACTIVE + q].pl,
849                                         m, pageq);
850                                 TAILQ_INSERT_TAIL(
851                                         &vm_page_queues[PQ_INACTIVE + q].pl,
852                                         m, pageq);
853                                 ++vm_swapcache_inactive_heuristic;
854                         }
855                         vm_page_and_queue_spin_unlock(m);
856                         vm_page_wakeup(m);
857                         goto next;
858                 }
859
860                 if (m->object == NULL || m->object->ref_count == 0) {
861                         /*
862                          * If the object is not being used, we ignore previous 
863                          * references.
864                          */
865                         vm_page_flag_clear(m, PG_REFERENCED);
866                         pmap_clear_reference(m);
867                         /* fall through to end */
868                 } else if (((m->flags & PG_REFERENCED) == 0) &&
869                             (actcount = pmap_ts_referenced(m))) {
870                         /*
871                          * Otherwise, if the page has been referenced while 
872                          * in the inactive queue, we bump the "activation
873                          * count" upwards, making it less likely that the
874                          * page will be added back to the inactive queue
875                          * prematurely again.  Here we check the page tables
876                          * (or emulated bits, if any), given the upper level
877                          * VM system not knowing anything about existing 
878                          * references.
879                          */
880                         vm_page_activate(m);
881                         m->act_count += (actcount + ACT_ADVANCE);
882                         vm_page_wakeup(m);
883                         goto next;
884                 }
885
886                 /*
887                  * (m) is still busied.
888                  *
889                  * If the upper level VM system knows about any page 
890                  * references, we activate the page.  We also set the 
891                  * "activation count" higher than normal so that we will less 
892                  * likely place pages back onto the inactive queue again.
893                  */
894                 if ((m->flags & PG_REFERENCED) != 0) {
895                         vm_page_flag_clear(m, PG_REFERENCED);
896                         actcount = pmap_ts_referenced(m);
897                         vm_page_activate(m);
898                         m->act_count += (actcount + ACT_ADVANCE + 1);
899                         vm_page_wakeup(m);
900                         goto next;
901                 }
902
903                 /*
904                  * If the upper level VM system doesn't know anything about 
905                  * the page being dirty, we have to check for it again.  As 
906                  * far as the VM code knows, any partially dirty pages are 
907                  * fully dirty.
908                  *
909                  * Pages marked PG_WRITEABLE may be mapped into the user
910                  * address space of a process running on another cpu.  A
911                  * user process (without holding the MP lock) running on
912                  * another cpu may be able to touch the page while we are
913                  * trying to remove it.  vm_page_cache() will handle this
914                  * case for us.
915                  */
916                 if (m->dirty == 0) {
917                         vm_page_test_dirty(m);
918                 } else {
919                         vm_page_dirty(m);
920                 }
921
922                 if (m->valid == 0 && (m->flags & PG_NEED_COMMIT) == 0) {
923                         /*
924                          * Invalid pages can be easily freed
925                          */
926                         vm_pageout_page_free(m);
927                         mycpu->gd_cnt.v_dfree++;
928                         ++delta;
929                 } else if (m->dirty == 0 && (m->flags & PG_NEED_COMMIT) == 0) {
930                         /*
931                          * Clean pages can be placed onto the cache queue.
932                          * This effectively frees them.
933                          */
934                         vm_page_cache(m);
935                         ++delta;
936                 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) {
937                         /*
938                          * Dirty pages need to be paged out, but flushing
939                          * a page is extremely expensive verses freeing
940                          * a clean page.  Rather then artificially limiting
941                          * the number of pages we can flush, we instead give
942                          * dirty pages extra priority on the inactive queue
943                          * by forcing them to be cycled through the queue
944                          * twice before being flushed, after which the 
945                          * (now clean) page will cycle through once more
946                          * before being freed.  This significantly extends
947                          * the thrash point for a heavily loaded machine.
948                          */
949                         vm_page_flag_set(m, PG_WINATCFLS);
950                         vm_page_and_queue_spin_lock(m);
951                         if (m->queue - m->pc == PQ_INACTIVE) {
952                                 TAILQ_REMOVE(
953                                         &vm_page_queues[PQ_INACTIVE + q].pl,
954                                         m, pageq);
955                                 TAILQ_INSERT_TAIL(
956                                         &vm_page_queues[PQ_INACTIVE + q].pl,
957                                         m, pageq);
958                                 ++vm_swapcache_inactive_heuristic;
959                         }
960                         vm_page_and_queue_spin_unlock(m);
961                         vm_page_wakeup(m);
962                 } else if (maxlaunder > 0) {
963                         /*
964                          * We always want to try to flush some dirty pages if
965                          * we encounter them, to keep the system stable.
966                          * Normally this number is small, but under extreme
967                          * pressure where there are insufficient clean pages
968                          * on the inactive queue, we may have to go all out.
969                          */
970                         int swap_pageouts_ok;
971                         struct vnode *vp = NULL;
972
973                         swap_pageouts_ok = 0;
974                         object = m->object;
975                         if (object &&
976                             (object->type != OBJT_SWAP) && 
977                             (object->type != OBJT_DEFAULT)) {
978                                 swap_pageouts_ok = 1;
979                         } else {
980                                 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
981                                 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
982                                 vm_page_count_min(0));
983                                                                                 
984                         }
985
986                         /*
987                          * We don't bother paging objects that are "dead".  
988                          * Those objects are in a "rundown" state.
989                          */
990                         if (!swap_pageouts_ok || 
991                             (object == NULL) ||
992                             (object->flags & OBJ_DEAD)) {
993                                 vm_page_and_queue_spin_lock(m);
994                                 if (m->queue - m->pc == PQ_INACTIVE) {
995                                         TAILQ_REMOVE(
996                                             &vm_page_queues[PQ_INACTIVE + q].pl,
997                                             m, pageq);
998                                         TAILQ_INSERT_TAIL(
999                                             &vm_page_queues[PQ_INACTIVE + q].pl,
1000                                             m, pageq);
1001                                         ++vm_swapcache_inactive_heuristic;
1002                                 }
1003                                 vm_page_and_queue_spin_unlock(m);
1004                                 vm_page_wakeup(m);
1005                                 goto next;
1006                         }
1007
1008                         /*
1009                          * (m) is still busied.
1010                          *
1011                          * The object is already known NOT to be dead.   It
1012                          * is possible for the vget() to block the whole
1013                          * pageout daemon, but the new low-memory handling
1014                          * code should prevent it.
1015                          *
1016                          * The previous code skipped locked vnodes and, worse,
1017                          * reordered pages in the queue.  This results in
1018                          * completely non-deterministic operation because,
1019                          * quite often, a vm_fault has initiated an I/O and
1020                          * is holding a locked vnode at just the point where
1021                          * the pageout daemon is woken up.
1022                          *
1023                          * We can't wait forever for the vnode lock, we might
1024                          * deadlock due to a vn_read() getting stuck in
1025                          * vm_wait while holding this vnode.  We skip the 
1026                          * vnode if we can't get it in a reasonable amount
1027                          * of time.
1028                          *
1029                          * vpfailed is used to (try to) avoid the case where
1030                          * a large number of pages are associated with a
1031                          * locked vnode, which could cause the pageout daemon
1032                          * to stall for an excessive amount of time.
1033                          */
1034                         if (object->type == OBJT_VNODE) {
1035                                 int flags;
1036
1037                                 vp = object->handle;
1038                                 flags = LK_EXCLUSIVE;
1039                                 if (vp == vpfailed)
1040                                         flags |= LK_NOWAIT;
1041                                 else
1042                                         flags |= LK_TIMELOCK;
1043                                 vm_page_hold(m);
1044                                 vm_page_wakeup(m);
1045
1046                                 /*
1047                                  * We have unbusied (m) temporarily so we can
1048                                  * acquire the vp lock without deadlocking.
1049                                  * (m) is held to prevent destruction.
1050                                  */
1051                                 if (vget(vp, flags) != 0) {
1052                                         vpfailed = vp;
1053                                         ++pageout_lock_miss;
1054                                         if (object->flags & OBJ_MIGHTBEDIRTY)
1055                                                     ++*vnodes_skippedp;
1056                                         vm_page_unhold(m);
1057                                         goto next;
1058                                 }
1059
1060                                 /*
1061                                  * The page might have been moved to another
1062                                  * queue during potential blocking in vget()
1063                                  * above.  The page might have been freed and
1064                                  * reused for another vnode.  The object might
1065                                  * have been reused for another vnode.
1066                                  */
1067                                 if (m->queue - m->pc != PQ_INACTIVE ||
1068                                     m->object != object ||
1069                                     object->handle != vp) {
1070                                         if (object->flags & OBJ_MIGHTBEDIRTY)
1071                                                 ++*vnodes_skippedp;
1072                                         vput(vp);
1073                                         vm_page_unhold(m);
1074                                         goto next;
1075                                 }
1076         
1077                                 /*
1078                                  * The page may have been busied during the
1079                                  * blocking in vput();  We don't move the
1080                                  * page back onto the end of the queue so that
1081                                  * statistics are more correct if we don't.
1082                                  */
1083                                 if (vm_page_busy_try(m, TRUE)) {
1084                                         vput(vp);
1085                                         vm_page_unhold(m);
1086                                         goto next;
1087                                 }
1088                                 vm_page_unhold(m);
1089
1090                                 /*
1091                                  * (m) is busied again
1092                                  *
1093                                  * We own the busy bit and remove our hold
1094                                  * bit.  If the page is still held it
1095                                  * might be undergoing I/O, so skip it.
1096                                  */
1097                                 if (m->hold_count) {
1098                                         vm_page_and_queue_spin_lock(m);
1099                                         if (m->queue - m->pc == PQ_INACTIVE) {
1100                                                 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl, m, pageq);
1101                                                 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE + q].pl, m, pageq);
1102                                                 ++vm_swapcache_inactive_heuristic;
1103                                         }
1104                                         vm_page_and_queue_spin_unlock(m);
1105                                         if (object->flags & OBJ_MIGHTBEDIRTY)
1106                                                 ++*vnodes_skippedp;
1107                                         vm_page_wakeup(m);
1108                                         vput(vp);
1109                                         goto next;
1110                                 }
1111                                 /* (m) is left busied as we fall through */
1112                         }
1113
1114                         /*
1115                          * page is busy and not held here.
1116                          *
1117                          * If a page is dirty, then it is either being washed
1118                          * (but not yet cleaned) or it is still in the
1119                          * laundry.  If it is still in the laundry, then we
1120                          * start the cleaning operation. 
1121                          *
1122                          * decrement inactive_shortage on success to account
1123                          * for the (future) cleaned page.  Otherwise we
1124                          * could wind up laundering or cleaning too many
1125                          * pages.
1126                          */
1127                         count = vm_pageout_clean(m);
1128                         delta += count;
1129                         maxlaunder -= count;
1130
1131                         /*
1132                          * Clean ate busy, page no longer accessible
1133                          */
1134                         if (vp != NULL)
1135                                 vput(vp);
1136                 } else {
1137                         vm_page_wakeup(m);
1138                 }
1139
1140 next:
1141                 /*
1142                  * Systems with a ton of memory can wind up with huge
1143                  * deactivation counts.  Because the inactive scan is
1144                  * doing a lot of flushing, the combination can result
1145                  * in excessive paging even in situations where other
1146                  * unrelated threads free up sufficient VM.
1147                  *
1148                  * To deal with this we abort the nominal active->inactive
1149                  * scan before we hit the inactive target when free+cache
1150                  * levels have reached a reasonable target.
1151                  *
1152                  * When deciding to stop early we need to add some slop to
1153                  * the test and we need to return full completion to the caller
1154                  * to prevent the caller from thinking there is something
1155                  * wrong and issuing a low-memory+swap warning or pkill.
1156                  */
1157                 vm_page_queues_spin_lock(PQ_INACTIVE + q);
1158                 if (vm_paging_target() < -vm_max_launder) {
1159                         /*
1160                          * Stopping early, return full completion to caller.
1161                          */
1162                         if (delta < avail_shortage)
1163                                 delta = avail_shortage;
1164                         break;
1165                 }
1166         }
1167
1168         /* page queue still spin-locked */
1169         TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
1170         vm_page_queues_spin_unlock(PQ_INACTIVE + q);
1171
1172         return (delta);
1173 }
1174
1175 static int
1176 vm_pageout_scan_active(int pass, int q,
1177                        int avail_shortage, int inactive_shortage,
1178                        int *recycle_countp)
1179 {
1180         struct vm_page marker;
1181         vm_page_t m;
1182         int actcount;
1183         int delta = 0;
1184         int maxscan;
1185
1186         /*
1187          * We want to move pages from the active queue to the inactive
1188          * queue to get the inactive queue to the inactive target.  If
1189          * we still have a page shortage from above we try to directly free
1190          * clean pages instead of moving them.
1191          *
1192          * If we do still have a shortage we keep track of the number of
1193          * pages we free or cache (recycle_count) as a measure of thrashing
1194          * between the active and inactive queues.
1195          *
1196          * If we were able to completely satisfy the free+cache targets
1197          * from the inactive pool we limit the number of pages we move
1198          * from the active pool to the inactive pool to 2x the pages we
1199          * had removed from the inactive pool (with a minimum of 1/5 the
1200          * inactive target).  If we were not able to completely satisfy
1201          * the free+cache targets we go for the whole target aggressively.
1202          *
1203          * NOTE: Both variables can end up negative.
1204          * NOTE: We are still in a critical section.
1205          */
1206
1207         bzero(&marker, sizeof(marker));
1208         marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
1209         marker.queue = PQ_ACTIVE + q;
1210         marker.pc = q;
1211         marker.wire_count = 1;
1212
1213         vm_page_queues_spin_lock(PQ_ACTIVE + q);
1214         TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1215         maxscan = vm_page_queues[PQ_ACTIVE + q].lcnt;
1216
1217         /*
1218          * Queue locked at top of loop to avoid stack marker issues.
1219          */
1220         while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
1221                maxscan-- > 0 && (avail_shortage - delta > 0 ||
1222                                 inactive_shortage > 0))
1223         {
1224                 KKASSERT(m->queue - m->pc == PQ_ACTIVE);
1225                 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl,
1226                              &marker, pageq);
1227                 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
1228                                    &marker, pageq);
1229
1230                 /*
1231                  * Skip marker pages (atomic against other markers to avoid
1232                  * infinite hop-over scans).
1233                  */
1234                 if (m->flags & PG_MARKER)
1235                         continue;
1236
1237                 /*
1238                  * Try to busy the page.  Don't mess with pages which are
1239                  * already busy or reorder them in the queue.
1240                  */
1241                 if (vm_page_busy_try(m, TRUE))
1242                         continue;
1243
1244                 /*
1245                  * Remaining operations run with the page busy and neither
1246                  * the page or the queue will be spin-locked.
1247                  */
1248                 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1249                 KKASSERT(m->queue - m->pc == PQ_ACTIVE);
1250                 lwkt_yield();
1251
1252                 /*
1253                  * Don't deactivate pages that are held, even if we can
1254                  * busy them.  (XXX why not?)
1255                  */
1256                 if (m->hold_count != 0) {
1257                         vm_page_and_queue_spin_lock(m);
1258                         if (m->queue - m->pc == PQ_ACTIVE) {
1259                                 TAILQ_REMOVE(
1260                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1261                                         m, pageq);
1262                                 TAILQ_INSERT_TAIL(
1263                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1264                                         m, pageq);
1265                         }
1266                         vm_page_and_queue_spin_unlock(m);
1267                         vm_page_wakeup(m);
1268                         goto next;
1269                 }
1270
1271                 /*
1272                  * The count for pagedaemon pages is done after checking the
1273                  * page for eligibility...
1274                  */
1275                 mycpu->gd_cnt.v_pdpages++;
1276
1277                 /*
1278                  * Check to see "how much" the page has been used and clear
1279                  * the tracking access bits.  If the object has no references
1280                  * don't bother paying the expense.
1281                  */
1282                 actcount = 0;
1283                 if (m->object && m->object->ref_count != 0) {
1284                         if (m->flags & PG_REFERENCED)
1285                                 ++actcount;
1286                         actcount += pmap_ts_referenced(m);
1287                         if (actcount) {
1288                                 m->act_count += ACT_ADVANCE + actcount;
1289                                 if (m->act_count > ACT_MAX)
1290                                         m->act_count = ACT_MAX;
1291                         }
1292                 }
1293                 vm_page_flag_clear(m, PG_REFERENCED);
1294
1295                 /*
1296                  * actcount is only valid if the object ref_count is non-zero.
1297                  * If the page does not have an object, actcount will be zero.
1298                  */
1299                 if (actcount && m->object->ref_count != 0) {
1300                         vm_page_and_queue_spin_lock(m);
1301                         if (m->queue - m->pc == PQ_ACTIVE) {
1302                                 TAILQ_REMOVE(
1303                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1304                                         m, pageq);
1305                                 TAILQ_INSERT_TAIL(
1306                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1307                                         m, pageq);
1308                         }
1309                         vm_page_and_queue_spin_unlock(m);
1310                         vm_page_wakeup(m);
1311                 } else {
1312                         switch(m->object->type) {
1313                         case OBJT_DEFAULT:
1314                         case OBJT_SWAP:
1315                                 m->act_count -= min(m->act_count,
1316                                                     vm_anonmem_decline);
1317                                 break;
1318                         default:
1319                                 m->act_count -= min(m->act_count,
1320                                                     vm_filemem_decline);
1321                                 break;
1322                         }
1323                         if (vm_pageout_algorithm ||
1324                             (m->object == NULL) ||
1325                             (m->object && (m->object->ref_count == 0)) ||
1326                             m->act_count < pass + 1
1327                         ) {
1328                                 /*
1329                                  * Deactivate the page.  If we had a
1330                                  * shortage from our inactive scan try to
1331                                  * free (cache) the page instead.
1332                                  *
1333                                  * Don't just blindly cache the page if
1334                                  * we do not have a shortage from the
1335                                  * inactive scan, that could lead to
1336                                  * gigabytes being moved.
1337                                  */
1338                                 --inactive_shortage;
1339                                 if (avail_shortage - delta > 0 ||
1340                                     (m->object && (m->object->ref_count == 0)))
1341                                 {
1342                                         if (avail_shortage - delta > 0)
1343                                                 ++*recycle_countp;
1344                                         vm_page_protect(m, VM_PROT_NONE);
1345                                         if (m->dirty == 0 &&
1346                                             (m->flags & PG_NEED_COMMIT) == 0 &&
1347                                             avail_shortage - delta > 0) {
1348                                                 vm_page_cache(m);
1349                                         } else {
1350                                                 vm_page_deactivate(m);
1351                                                 vm_page_wakeup(m);
1352                                         }
1353                                 } else {
1354                                         vm_page_deactivate(m);
1355                                         vm_page_wakeup(m);
1356                                 }
1357                                 ++delta;
1358                         } else {
1359                                 vm_page_and_queue_spin_lock(m);
1360                                 if (m->queue - m->pc == PQ_ACTIVE) {
1361                                         TAILQ_REMOVE(
1362                                             &vm_page_queues[PQ_ACTIVE + q].pl,
1363                                             m, pageq);
1364                                         TAILQ_INSERT_TAIL(
1365                                             &vm_page_queues[PQ_ACTIVE + q].pl,
1366                                             m, pageq);
1367                                 }
1368                                 vm_page_and_queue_spin_unlock(m);
1369                                 vm_page_wakeup(m);
1370                         }
1371                 }
1372 next:
1373                 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1374         }
1375
1376         /*
1377          * Clean out our local marker.
1378          *
1379          * Page queue still spin-locked.
1380          */
1381         TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1382         vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1383
1384         return (delta);
1385 }
1386
1387 /*
1388  * The number of actually free pages can drop down to v_free_reserved,
1389  * we try to build the free count back above v_free_min.  Note that
1390  * vm_paging_needed() also returns TRUE if v_free_count is not at
1391  * least v_free_min so that is the minimum we must build the free
1392  * count to.
1393  *
1394  * We use a slightly higher target to improve hysteresis,
1395  * ((v_free_target + v_free_min) / 2).  Since v_free_target
1396  * is usually the same as v_cache_min this maintains about
1397  * half the pages in the free queue as are in the cache queue,
1398  * providing pretty good pipelining for pageout operation.
1399  *
1400  * The system operator can manipulate vm.v_cache_min and
1401  * vm.v_free_target to tune the pageout demon.  Be sure
1402  * to keep vm.v_free_min < vm.v_free_target.
1403  *
1404  * Note that the original paging target is to get at least
1405  * (free_min + cache_min) into (free + cache).  The slightly
1406  * higher target will shift additional pages from cache to free
1407  * without effecting the original paging target in order to
1408  * maintain better hysteresis and not have the free count always
1409  * be dead-on v_free_min.
1410  *
1411  * NOTE: we are still in a critical section.
1412  *
1413  * Pages moved from PQ_CACHE to totally free are not counted in the
1414  * pages_freed counter.
1415  */
1416 static void
1417 vm_pageout_scan_cache(int avail_shortage, int pass,
1418                       int vnodes_skipped, int recycle_count)
1419 {
1420         static int lastkillticks;
1421         struct vm_pageout_scan_info info;
1422         vm_page_t m;
1423
1424         while (vmstats.v_free_count <
1425                (vmstats.v_free_min + vmstats.v_free_target) / 2) {
1426                 /*
1427                  * This steals some code from vm/vm_page.c
1428                  */
1429                 static int cache_rover = 0;
1430
1431                 m = vm_page_list_find(PQ_CACHE,
1432                                       cache_rover & PQ_L2_MASK, FALSE);
1433                 if (m == NULL)
1434                         break;
1435                 /* page is returned removed from its queue and spinlocked */
1436                 if (vm_page_busy_try(m, TRUE)) {
1437                         vm_page_deactivate_locked(m);
1438                         vm_page_spin_unlock(m);
1439                         continue;
1440                 }
1441                 vm_page_spin_unlock(m);
1442                 pagedaemon_wakeup();
1443                 lwkt_yield();
1444
1445                 /*
1446                  * Remaining operations run with the page busy and neither
1447                  * the page or the queue will be spin-locked.
1448                  */
1449                 if ((m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) ||
1450                     m->hold_count ||
1451                     m->wire_count) {
1452                         vm_page_deactivate(m);
1453                         vm_page_wakeup(m);
1454                         continue;
1455                 }
1456                 KKASSERT((m->flags & PG_MAPPED) == 0);
1457                 KKASSERT(m->dirty == 0);
1458                 cache_rover += PQ_PRIME2;
1459                 vm_pageout_page_free(m);
1460                 mycpu->gd_cnt.v_dfree++;
1461         }
1462
1463 #if !defined(NO_SWAPPING)
1464         /*
1465          * Idle process swapout -- run once per second.
1466          */
1467         if (vm_swap_idle_enabled) {
1468                 static time_t lsec;
1469                 if (time_uptime != lsec) {
1470                         vm_pageout_req_swapout |= VM_SWAP_IDLE;
1471                         vm_req_vmdaemon();
1472                         lsec = time_uptime;
1473                 }
1474         }
1475 #endif
1476                 
1477         /*
1478          * If we didn't get enough free pages, and we have skipped a vnode
1479          * in a writeable object, wakeup the sync daemon.  And kick swapout
1480          * if we did not get enough free pages.
1481          */
1482         if (vm_paging_target() > 0) {
1483                 if (vnodes_skipped && vm_page_count_min(0))
1484                         speedup_syncer(NULL);
1485 #if !defined(NO_SWAPPING)
1486                 if (vm_swap_enabled && vm_page_count_target()) {
1487                         vm_req_vmdaemon();
1488                         vm_pageout_req_swapout |= VM_SWAP_NORMAL;
1489                 }
1490 #endif
1491         }
1492
1493         /*
1494          * Handle catastrophic conditions.  Under good conditions we should
1495          * be at the target, well beyond our minimum.  If we could not even
1496          * reach our minimum the system is under heavy stress.  But just being
1497          * under heavy stress does not trigger process killing.
1498          *
1499          * We consider ourselves to have run out of memory if the swap pager
1500          * is full and avail_shortage is still positive.  The secondary check
1501          * ensures that we do not kill processes if the instantanious
1502          * availability is good, even if the pageout demon pass says it
1503          * couldn't get to the target.
1504          */
1505         if (swap_pager_almost_full &&
1506             pass > 0 &&
1507             (vm_page_count_min(recycle_count) || avail_shortage > 0)) {
1508                 kprintf("Warning: system low on memory+swap "
1509                         "shortage %d for %d ticks!\n",
1510                         avail_shortage, ticks - swap_fail_ticks);
1511         }
1512         if (swap_pager_full &&
1513             pass > 1 &&
1514             avail_shortage > 0 &&
1515             vm_paging_target() > 0 &&
1516             (unsigned int)(ticks - lastkillticks) >= hz) {
1517                 /*
1518                  * Kill something, maximum rate once per second to give
1519                  * the process time to free up sufficient memory.
1520                  */
1521                 lastkillticks = ticks;
1522                 info.bigproc = NULL;
1523                 info.bigsize = 0;
1524                 allproc_scan(vm_pageout_scan_callback, &info);
1525                 if (info.bigproc != NULL) {
1526                         info.bigproc->p_nice = PRIO_MIN;
1527                         info.bigproc->p_usched->resetpriority(
1528                                 FIRST_LWP_IN_PROC(info.bigproc));
1529                         killproc(info.bigproc, "out of swap space");
1530                         wakeup(&vmstats.v_free_count);
1531                         PRELE(info.bigproc);
1532                 }
1533         }
1534 }
1535
1536 static int
1537 vm_pageout_scan_callback(struct proc *p, void *data)
1538 {
1539         struct vm_pageout_scan_info *info = data;
1540         vm_offset_t size;
1541
1542         /*
1543          * Never kill system processes or init.  If we have configured swap
1544          * then try to avoid killing low-numbered pids.
1545          */
1546         if ((p->p_flags & P_SYSTEM) || (p->p_pid == 1) ||
1547             ((p->p_pid < 48) && (vm_swap_size != 0))) {
1548                 return (0);
1549         }
1550
1551         lwkt_gettoken(&p->p_token);
1552
1553         /*
1554          * if the process is in a non-running type state,
1555          * don't touch it.
1556          */
1557         if (p->p_stat != SACTIVE && p->p_stat != SSTOP && p->p_stat != SCORE) {
1558                 lwkt_reltoken(&p->p_token);
1559                 return (0);
1560         }
1561
1562         /*
1563          * Get the approximate process size.  Note that anonymous pages
1564          * with backing swap will be counted twice, but there should not
1565          * be too many such pages due to the stress the VM system is
1566          * under at this point.
1567          */
1568         size = vmspace_anonymous_count(p->p_vmspace) +
1569                 vmspace_swap_count(p->p_vmspace);
1570
1571         /*
1572          * If the this process is bigger than the biggest one
1573          * remember it.
1574          */
1575         if (info->bigsize < size) {
1576                 if (info->bigproc)
1577                         PRELE(info->bigproc);
1578                 PHOLD(p);
1579                 info->bigproc = p;
1580                 info->bigsize = size;
1581         }
1582         lwkt_reltoken(&p->p_token);
1583         lwkt_yield();
1584
1585         return(0);
1586 }
1587
1588 /*
1589  * This routine tries to maintain the pseudo LRU active queue,
1590  * so that during long periods of time where there is no paging,
1591  * that some statistic accumulation still occurs.  This code
1592  * helps the situation where paging just starts to occur.
1593  */
1594 static void
1595 vm_pageout_page_stats(int q)
1596 {
1597         static int fullintervalcount = 0;
1598         struct vm_page marker;
1599         vm_page_t m;
1600         int pcount, tpcount;            /* Number of pages to check */
1601         int page_shortage;
1602
1603         page_shortage = (vmstats.v_inactive_target + vmstats.v_cache_max +
1604                          vmstats.v_free_min) -
1605                         (vmstats.v_free_count + vmstats.v_inactive_count +
1606                          vmstats.v_cache_count);
1607
1608         if (page_shortage <= 0)
1609                 return;
1610
1611         pcount = vm_page_queues[PQ_ACTIVE + q].lcnt;
1612         fullintervalcount += vm_pageout_stats_interval;
1613         if (fullintervalcount < vm_pageout_full_stats_interval) {
1614                 tpcount = (vm_pageout_stats_max * pcount) /
1615                           vmstats.v_page_count + 1;
1616                 if (pcount > tpcount)
1617                         pcount = tpcount;
1618         } else {
1619                 fullintervalcount = 0;
1620         }
1621
1622         bzero(&marker, sizeof(marker));
1623         marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
1624         marker.queue = PQ_ACTIVE + q;
1625         marker.pc = q;
1626         marker.wire_count = 1;
1627
1628         vm_page_queues_spin_lock(PQ_ACTIVE + q);
1629         TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1630
1631         /*
1632          * Queue locked at top of loop to avoid stack marker issues.
1633          */
1634         while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
1635                pcount-- > 0)
1636         {
1637                 int actcount;
1638
1639                 KKASSERT(m->queue - m->pc == PQ_ACTIVE);
1640                 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1641                 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
1642                                    &marker, pageq);
1643
1644                 /*
1645                  * Skip marker pages (atomic against other markers to avoid
1646                  * infinite hop-over scans).
1647                  */
1648                 if (m->flags & PG_MARKER)
1649                         continue;
1650
1651                 /*
1652                  * Ignore pages we can't busy
1653                  */
1654                 if (vm_page_busy_try(m, TRUE))
1655                         continue;
1656
1657                 /*
1658                  * Remaining operations run with the page busy and neither
1659                  * the page or the queue will be spin-locked.
1660                  */
1661                 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1662                 KKASSERT(m->queue - m->pc == PQ_ACTIVE);
1663
1664                 /*
1665                  * We now have a safely busied page, the page and queue
1666                  * spinlocks have been released.
1667                  *
1668                  * Ignore held pages
1669                  */
1670                 if (m->hold_count) {
1671                         vm_page_wakeup(m);
1672                         goto next;
1673                 }
1674
1675                 /*
1676                  * Calculate activity
1677                  */
1678                 actcount = 0;
1679                 if (m->flags & PG_REFERENCED) {
1680                         vm_page_flag_clear(m, PG_REFERENCED);
1681                         actcount += 1;
1682                 }
1683                 actcount += pmap_ts_referenced(m);
1684
1685                 /*
1686                  * Update act_count and move page to end of queue.
1687                  */
1688                 if (actcount) {
1689                         m->act_count += ACT_ADVANCE + actcount;
1690                         if (m->act_count > ACT_MAX)
1691                                 m->act_count = ACT_MAX;
1692                         vm_page_and_queue_spin_lock(m);
1693                         if (m->queue - m->pc == PQ_ACTIVE) {
1694                                 TAILQ_REMOVE(
1695                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1696                                         m, pageq);
1697                                 TAILQ_INSERT_TAIL(
1698                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1699                                         m, pageq);
1700                         }
1701                         vm_page_and_queue_spin_unlock(m);
1702                         vm_page_wakeup(m);
1703                         goto next;
1704                 }
1705
1706                 if (m->act_count == 0) {
1707                         /*
1708                          * We turn off page access, so that we have
1709                          * more accurate RSS stats.  We don't do this
1710                          * in the normal page deactivation when the
1711                          * system is loaded VM wise, because the
1712                          * cost of the large number of page protect
1713                          * operations would be higher than the value
1714                          * of doing the operation.
1715                          *
1716                          * We use the marker to save our place so
1717                          * we can release the spin lock.  both (m)
1718                          * and (next) will be invalid.
1719                          */
1720                         vm_page_protect(m, VM_PROT_NONE);
1721                         vm_page_deactivate(m);
1722                 } else {
1723                         m->act_count -= min(m->act_count, ACT_DECLINE);
1724                         vm_page_and_queue_spin_lock(m);
1725                         if (m->queue - m->pc == PQ_ACTIVE) {
1726                                 TAILQ_REMOVE(
1727                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1728                                         m, pageq);
1729                                 TAILQ_INSERT_TAIL(
1730                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1731                                         m, pageq);
1732                         }
1733                         vm_page_and_queue_spin_unlock(m);
1734                 }
1735                 vm_page_wakeup(m);
1736 next:
1737                 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1738         }
1739
1740         /*
1741          * Remove our local marker
1742          *
1743          * Page queue still spin-locked.
1744          */
1745         TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1746         vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1747 }
1748
1749 static int
1750 vm_pageout_free_page_calc(vm_size_t count)
1751 {
1752         if (count < vmstats.v_page_count)
1753                  return 0;
1754         /*
1755          * free_reserved needs to include enough for the largest swap pager
1756          * structures plus enough for any pv_entry structs when paging.
1757          *
1758          * v_free_min           normal allocations
1759          * v_free_reserved      system allocations
1760          * v_pageout_free_min   allocations by pageout daemon
1761          * v_interrupt_free_min low level allocations (e.g swap structures)
1762          */
1763         if (vmstats.v_page_count > 1024)
1764                 vmstats.v_free_min = 64 + (vmstats.v_page_count - 1024) / 200;
1765         else
1766                 vmstats.v_free_min = 64;
1767         vmstats.v_free_reserved = vmstats.v_free_min * 4 / 8 + 7;
1768         vmstats.v_free_severe = vmstats.v_free_min * 4 / 8 + 0;
1769         vmstats.v_pageout_free_min = vmstats.v_free_min * 2 / 8 + 7;
1770         vmstats.v_interrupt_free_min = vmstats.v_free_min * 1 / 8 + 7;
1771
1772         return 1;
1773 }
1774
1775
1776 /*
1777  * vm_pageout is the high level pageout daemon.
1778  *
1779  * No requirements.
1780  */
1781 static void
1782 vm_pageout_thread(void)
1783 {
1784         int pass;
1785         int q;
1786         int q1iterator = 0;
1787         int q2iterator = 0;
1788
1789         /*
1790          * Initialize some paging parameters.
1791          */
1792         curthread->td_flags |= TDF_SYSTHREAD;
1793
1794         vm_pageout_free_page_calc(vmstats.v_page_count);
1795
1796         /*
1797          * v_free_target and v_cache_min control pageout hysteresis.  Note
1798          * that these are more a measure of the VM cache queue hysteresis
1799          * then the VM free queue.  Specifically, v_free_target is the
1800          * high water mark (free+cache pages).
1801          *
1802          * v_free_reserved + v_cache_min (mostly means v_cache_min) is the
1803          * low water mark, while v_free_min is the stop.  v_cache_min must
1804          * be big enough to handle memory needs while the pageout daemon
1805          * is signalled and run to free more pages.
1806          */
1807         if (vmstats.v_free_count > 6144)
1808                 vmstats.v_free_target = 4 * vmstats.v_free_min + vmstats.v_free_reserved;
1809         else
1810                 vmstats.v_free_target = 2 * vmstats.v_free_min + vmstats.v_free_reserved;
1811
1812         /*
1813          * NOTE: With the new buffer cache b_act_count we want the default
1814          *       inactive target to be a percentage of available memory.
1815          *
1816          *       The inactive target essentially determines the minimum
1817          *       number of 'temporary' pages capable of caching one-time-use
1818          *       files when the VM system is otherwise full of pages
1819          *       belonging to multi-time-use files or active program data.
1820          *
1821          * NOTE: The inactive target is aggressively persued only if the
1822          *       inactive queue becomes too small.  If the inactive queue
1823          *       is large enough to satisfy page movement to free+cache
1824          *       then it is repopulated more slowly from the active queue.
1825          *       This allows a general inactive_target default to be set.
1826          *
1827          *       There is an issue here for processes which sit mostly idle
1828          *       'overnight', such as sshd, tcsh, and X.  Any movement from
1829          *       the active queue will eventually cause such pages to
1830          *       recycle eventually causing a lot of paging in the morning.
1831          *       To reduce the incidence of this pages cycled out of the
1832          *       buffer cache are moved directly to the inactive queue if
1833          *       they were only used once or twice.
1834          *
1835          *       The vfs.vm_cycle_point sysctl can be used to adjust this.
1836          *       Increasing the value (up to 64) increases the number of
1837          *       buffer recyclements which go directly to the inactive queue.
1838          */
1839         if (vmstats.v_free_count > 2048) {
1840                 vmstats.v_cache_min = vmstats.v_free_target;
1841                 vmstats.v_cache_max = 2 * vmstats.v_cache_min;
1842         } else {
1843                 vmstats.v_cache_min = 0;
1844                 vmstats.v_cache_max = 0;
1845         }
1846         vmstats.v_inactive_target = vmstats.v_free_count / 4;
1847
1848         /* XXX does not really belong here */
1849         if (vm_page_max_wired == 0)
1850                 vm_page_max_wired = vmstats.v_free_count / 3;
1851
1852         if (vm_pageout_stats_max == 0)
1853                 vm_pageout_stats_max = vmstats.v_free_target;
1854
1855         /*
1856          * Set interval in seconds for stats scan.
1857          */
1858         if (vm_pageout_stats_interval == 0)
1859                 vm_pageout_stats_interval = 5;
1860         if (vm_pageout_full_stats_interval == 0)
1861                 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
1862         
1863
1864         /*
1865          * Set maximum free per pass
1866          */
1867         if (vm_pageout_stats_free_max == 0)
1868                 vm_pageout_stats_free_max = 5;
1869
1870         swap_pager_swap_init();
1871         pass = 0;
1872
1873         /*
1874          * The pageout daemon is never done, so loop forever.
1875          */
1876         while (TRUE) {
1877                 int error;
1878                 int avail_shortage;
1879                 int inactive_shortage;
1880                 int vnodes_skipped = 0;
1881                 int recycle_count = 0;
1882                 int tmp;
1883
1884                 /*
1885                  * Wait for an action request.  If we timeout check to
1886                  * see if paging is needed (in case the normal wakeup
1887                  * code raced us).
1888                  */
1889                 if (vm_pages_needed == 0) {
1890                         error = tsleep(&vm_pages_needed,
1891                                        0, "psleep",
1892                                        vm_pageout_stats_interval * hz);
1893                         if (error &&
1894                             vm_paging_needed() == 0 &&
1895                             vm_pages_needed == 0) {
1896                                 for (q = 0; q < PQ_L2_SIZE; ++q)
1897                                         vm_pageout_page_stats(q);
1898                                 continue;
1899                         }
1900                         vm_pages_needed = 1;
1901                 }
1902
1903                 mycpu->gd_cnt.v_pdwakeups++;
1904
1905                 /*
1906                  * Do whatever cleanup that the pmap code can.
1907                  */
1908                 pmap_collect();
1909
1910                 /*
1911                  * Scan for pageout.  Try to avoid thrashing the system
1912                  * with activity.
1913                  *
1914                  * Calculate our target for the number of free+cache pages we
1915                  * want to get to.  This is higher then the number that causes
1916                  * allocations to stall (severe) in order to provide hysteresis,
1917                  * and if we don't make it all the way but get to the minimum
1918                  * we're happy.  Goose it a bit if there are multiple requests
1919                  * for memory.
1920                  *
1921                  * Don't reduce avail_shortage inside the loop or the
1922                  * PQAVERAGE() calculation will break.
1923                  */
1924                 avail_shortage = vm_paging_target() + vm_pageout_deficit;
1925                 vm_pageout_deficit = 0;
1926
1927                 if (avail_shortage > 0) {
1928                         int delta = 0;
1929
1930                         for (q = 0; q < PQ_L2_SIZE; ++q) {
1931                                 delta += vm_pageout_scan_inactive(
1932                                             pass,
1933                                             (q + q1iterator) & PQ_L2_MASK,
1934                                             PQAVERAGE(avail_shortage),
1935                                             &vnodes_skipped);
1936                                 if (avail_shortage - delta <= 0)
1937                                         break;
1938                         }
1939                         avail_shortage -= delta;
1940                         q1iterator = q + 1;
1941                 }
1942
1943                 /*
1944                  * Figure out how many active pages we must deactivate.  If
1945                  * we were able to reach our target with just the inactive
1946                  * scan above we limit the number of active pages we
1947                  * deactivate to reduce unnecessary work.
1948                  */
1949                 inactive_shortage = vmstats.v_inactive_target -
1950                                     vmstats.v_inactive_count;
1951
1952                 /*
1953                  * If we were unable to free sufficient inactive pages to
1954                  * satisfy the free/cache queue requirements then simply
1955                  * reaching the inactive target may not be good enough.
1956                  * Try to deactivate pages in excess of the target based
1957                  * on the shortfall.
1958                  *
1959                  * However to prevent thrashing the VM system do not
1960                  * deactivate more than an additional 1/10 the inactive
1961                  * target's worth of active pages.
1962                  */
1963                 if (avail_shortage > 0) {
1964                         tmp = avail_shortage * 2;
1965                         if (tmp > vmstats.v_inactive_target / 10)
1966                                 tmp = vmstats.v_inactive_target / 10;
1967                         inactive_shortage += tmp;
1968                 }
1969
1970                 /*
1971                  * Only trigger on inactive shortage.  Triggering on
1972                  * avail_shortage can starve the active queue with
1973                  * unnecessary active->inactive transitions and destroy
1974                  * performance.
1975                  */
1976                 if (/*avail_shortage > 0 ||*/ inactive_shortage > 0) {
1977                         int delta = 0;
1978
1979                         for (q = 0; q < PQ_L2_SIZE; ++q) {
1980                                 delta += vm_pageout_scan_active(
1981                                                 pass,
1982                                                 (q + q2iterator) & PQ_L2_MASK,
1983                                                 PQAVERAGE(avail_shortage),
1984                                                 PQAVERAGE(inactive_shortage),
1985                                                 &recycle_count);
1986                                 if (inactive_shortage - delta <= 0 &&
1987                                     avail_shortage - delta <= 0) {
1988                                         break;
1989                                 }
1990                         }
1991                         inactive_shortage -= delta;
1992                         avail_shortage -= delta;
1993                         q2iterator = q + 1;
1994                 }
1995
1996                 /*
1997                  * Finally free enough cache pages to meet our free page
1998                  * requirement and take more drastic measures if we are
1999                  * still in trouble.
2000                  */
2001                 vm_pageout_scan_cache(avail_shortage, pass,
2002                                       vnodes_skipped, recycle_count);
2003
2004                 /*
2005                  * Wait for more work.
2006                  */
2007                 if (avail_shortage > 0) {
2008                         ++pass;
2009                         if (pass < 10 && vm_pages_needed > 1) {
2010                                 /*
2011                                  * Normal operation, additional processes
2012                                  * have already kicked us.  Retry immediately
2013                                  * unless swap space is completely full in
2014                                  * which case delay a bit.
2015                                  */
2016                                 if (swap_pager_full) {
2017                                         tsleep(&vm_pages_needed, 0, "pdelay",
2018                                                 hz / 5);
2019                                 } /* else immediate retry */
2020                         } else if (pass < 10) {
2021                                 /*
2022                                  * Normal operation, fewer processes.  Delay
2023                                  * a bit but allow wakeups.
2024                                  */
2025                                 vm_pages_needed = 0;
2026                                 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
2027                                 vm_pages_needed = 1;
2028                         } else if (swap_pager_full == 0) {
2029                                 /*
2030                                  * We've taken too many passes, forced delay.
2031                                  */
2032                                 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
2033                         } else {
2034                                 /*
2035                                  * Running out of memory, catastrophic
2036                                  * back-off to one-second intervals.
2037                                  */
2038                                 tsleep(&vm_pages_needed, 0, "pdelay", hz);
2039                         }
2040                 } else if (vm_pages_needed) {
2041                         /*
2042                          * Interlocked wakeup of waiters (non-optional).
2043                          *
2044                          * Similar to vm_page_free_wakeup() in vm_page.c,
2045                          * wake
2046                          */
2047                         pass = 0;
2048                         if (!vm_page_count_min(vm_page_free_hysteresis) ||
2049                             !vm_page_count_target()) {
2050                                 vm_pages_needed = 0;
2051                                 wakeup(&vmstats.v_free_count);
2052                         }
2053                 } else {
2054                         pass = 0;
2055                 }
2056         }
2057 }
2058
2059 static struct kproc_desc page_kp = {
2060         "pagedaemon",
2061         vm_pageout_thread,
2062         &pagethread
2063 };
2064 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp);
2065
2066
2067 /*
2068  * Called after allocating a page out of the cache or free queue
2069  * to possibly wake the pagedaemon up to replentish our supply.
2070  *
2071  * We try to generate some hysteresis by waking the pagedaemon up
2072  * when our free+cache pages go below the free_min+cache_min level.
2073  * The pagedaemon tries to get the count back up to at least the
2074  * minimum, and through to the target level if possible.
2075  *
2076  * If the pagedaemon is already active bump vm_pages_needed as a hint
2077  * that there are even more requests pending.
2078  *
2079  * SMP races ok?
2080  * No requirements.
2081  */
2082 void
2083 pagedaemon_wakeup(void)
2084 {
2085         if (vm_paging_needed() && curthread != pagethread) {
2086                 if (vm_pages_needed == 0) {
2087                         vm_pages_needed = 1;    /* SMP race ok */
2088                         wakeup(&vm_pages_needed);
2089                 } else if (vm_page_count_min(0)) {
2090                         ++vm_pages_needed;      /* SMP race ok */
2091                 }
2092         }
2093 }
2094
2095 #if !defined(NO_SWAPPING)
2096
2097 /*
2098  * SMP races ok?
2099  * No requirements.
2100  */
2101 static void
2102 vm_req_vmdaemon(void)
2103 {
2104         static int lastrun = 0;
2105
2106         if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
2107                 wakeup(&vm_daemon_needed);
2108                 lastrun = ticks;
2109         }
2110 }
2111
2112 static int vm_daemon_callback(struct proc *p, void *data __unused);
2113
2114 /*
2115  * No requirements.
2116  */
2117 static void
2118 vm_daemon(void)
2119 {
2120         /*
2121          * XXX vm_daemon_needed specific token?
2122          */
2123         while (TRUE) {
2124                 tsleep(&vm_daemon_needed, 0, "psleep", 0);
2125                 if (vm_pageout_req_swapout) {
2126                         swapout_procs(vm_pageout_req_swapout);
2127                         vm_pageout_req_swapout = 0;
2128                 }
2129                 /*
2130                  * scan the processes for exceeding their rlimits or if
2131                  * process is swapped out -- deactivate pages
2132                  */
2133                 allproc_scan(vm_daemon_callback, NULL);
2134         }
2135 }
2136
2137 static int
2138 vm_daemon_callback(struct proc *p, void *data __unused)
2139 {
2140         struct vmspace *vm;
2141         vm_pindex_t limit, size;
2142
2143         /*
2144          * if this is a system process or if we have already
2145          * looked at this process, skip it.
2146          */
2147         lwkt_gettoken(&p->p_token);
2148
2149         if (p->p_flags & (P_SYSTEM | P_WEXIT)) {
2150                 lwkt_reltoken(&p->p_token);
2151                 return (0);
2152         }
2153
2154         /*
2155          * if the process is in a non-running type state,
2156          * don't touch it.
2157          */
2158         if (p->p_stat != SACTIVE && p->p_stat != SSTOP && p->p_stat != SCORE) {
2159                 lwkt_reltoken(&p->p_token);
2160                 return (0);
2161         }
2162
2163         /*
2164          * get a limit
2165          */
2166         limit = OFF_TO_IDX(qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
2167                                 p->p_rlimit[RLIMIT_RSS].rlim_max));
2168
2169         /*
2170          * let processes that are swapped out really be
2171          * swapped out.  Set the limit to nothing to get as
2172          * many pages out to swap as possible.
2173          */
2174         if (p->p_flags & P_SWAPPEDOUT)
2175                 limit = 0;
2176
2177         vm = p->p_vmspace;
2178         vmspace_hold(vm);
2179         size = vmspace_resident_count(vm);
2180         if (limit >= 0 && size >= limit) {
2181                 vm_pageout_map_deactivate_pages(&vm->vm_map, limit);
2182         }
2183         vmspace_drop(vm);
2184
2185         lwkt_reltoken(&p->p_token);
2186
2187         return (0);
2188 }
2189
2190 #endif