Merge branch 'vendor/BMAKE'
[dragonfly.git] / sys / vm / vm_pageout.c
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * The Mach Operating System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *      from: @(#)vm_pageout.c  7.4 (Berkeley) 5/7/91
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $FreeBSD: src/sys/vm/vm_pageout.c,v 1.151.2.15 2002/12/29 18:21:04 dillon Exp $
65  */
66
67 /*
68  *      The proverbial page-out daemon.
69  */
70
71 #include "opt_vm.h"
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/kernel.h>
75 #include <sys/proc.h>
76 #include <sys/kthread.h>
77 #include <sys/resourcevar.h>
78 #include <sys/signalvar.h>
79 #include <sys/vnode.h>
80 #include <sys/vmmeter.h>
81 #include <sys/sysctl.h>
82
83 #include <vm/vm.h>
84 #include <vm/vm_param.h>
85 #include <sys/lock.h>
86 #include <vm/vm_object.h>
87 #include <vm/vm_page.h>
88 #include <vm/vm_map.h>
89 #include <vm/vm_pageout.h>
90 #include <vm/vm_pager.h>
91 #include <vm/swap_pager.h>
92 #include <vm/vm_extern.h>
93
94 #include <sys/thread2.h>
95 #include <sys/spinlock2.h>
96 #include <vm/vm_page2.h>
97
98 /*
99  * System initialization
100  */
101
102 /* the kernel process "vm_pageout"*/
103 static int vm_pageout_clean (vm_page_t);
104 static int vm_pageout_free_page_calc (vm_size_t count);
105 struct thread *pagethread;
106
107 #if !defined(NO_SWAPPING)
108 /* the kernel process "vm_daemon"*/
109 static void vm_daemon (void);
110 static struct   thread *vmthread;
111
112 static struct kproc_desc vm_kp = {
113         "vmdaemon",
114         vm_daemon,
115         &vmthread
116 };
117 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp);
118 #endif
119
120 int vm_pages_needed=0;          /* Event on which pageout daemon sleeps */
121 int vm_pageout_deficit=0;       /* Estimated number of pages deficit */
122 int vm_pageout_pages_needed=0;  /* pageout daemon needs pages */
123 int vm_page_free_hysteresis = 16;
124
125 #if !defined(NO_SWAPPING)
126 static int vm_pageout_req_swapout;      /* XXX */
127 static int vm_daemon_needed;
128 #endif
129 static int vm_max_launder = 4096;
130 static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
131 static int vm_pageout_full_stats_interval = 0;
132 static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0;
133 static int defer_swap_pageouts=0;
134 static int disable_swap_pageouts=0;
135 static u_int vm_anonmem_decline = ACT_DECLINE;
136 static u_int vm_filemem_decline = ACT_DECLINE * 2;
137
138 #if defined(NO_SWAPPING)
139 static int vm_swap_enabled=0;
140 static int vm_swap_idle_enabled=0;
141 #else
142 static int vm_swap_enabled=1;
143 static int vm_swap_idle_enabled=0;
144 #endif
145
146 SYSCTL_UINT(_vm, VM_PAGEOUT_ALGORITHM, anonmem_decline,
147         CTLFLAG_RW, &vm_anonmem_decline, 0, "active->inactive anon memory");
148
149 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, filemem_decline,
150         CTLFLAG_RW, &vm_filemem_decline, 0, "active->inactive file cache");
151
152 SYSCTL_INT(_vm, OID_AUTO, page_free_hysteresis,
153         CTLFLAG_RW, &vm_page_free_hysteresis, 0,
154         "Free more pages than the minimum required");
155
156 SYSCTL_INT(_vm, OID_AUTO, max_launder,
157         CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout");
158
159 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
160         CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
161
162 SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
163         CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
164
165 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
166         CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
167
168 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
169         CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented");
170
171 #if defined(NO_SWAPPING)
172 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
173         CTLFLAG_RD, &vm_swap_enabled, 0, "");
174 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
175         CTLFLAG_RD, &vm_swap_idle_enabled, 0, "");
176 #else
177 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
178         CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
179 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
180         CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
181 #endif
182
183 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
184         CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
185
186 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
187         CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
188
189 static int pageout_lock_miss;
190 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
191         CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
192
193 int vm_page_max_wired;          /* XXX max # of wired pages system-wide */
194
195 #if !defined(NO_SWAPPING)
196 typedef void freeer_fcn_t (vm_map_t, vm_object_t, vm_pindex_t, int);
197 static void vm_pageout_map_deactivate_pages (vm_map_t, vm_pindex_t);
198 static freeer_fcn_t vm_pageout_object_deactivate_pages;
199 static void vm_req_vmdaemon (void);
200 #endif
201 static void vm_pageout_page_stats(int q);
202
203 /*
204  * Calculate approximately how many pages on each queue to try to
205  * clean.  An exact calculation creates an edge condition when the
206  * queues are unbalanced so add significant slop.  The queue scans
207  * will stop early when targets are reached and will start where they
208  * left off on the next pass.
209  *
210  * We need to be generous here because there are all sorts of loading
211  * conditions that can cause edge cases if try to average over all queues.
212  * In particular, storage subsystems have become so fast that paging
213  * activity can become quite frantic.  Eventually we will probably need
214  * two paging threads, one for dirty pages and one for clean, to deal
215  * with the bandwidth requirements.
216
217  * So what we do is calculate a value that can be satisfied nominally by
218  * only having to scan half the queues.
219  */
220 static __inline int
221 PQAVERAGE(int n)
222 {
223         int avg;
224
225         if (n >= 0) {
226                 avg = ((n + (PQ_L2_SIZE - 1)) / (PQ_L2_SIZE / 2) + 1);
227         } else {
228                 avg = ((n - (PQ_L2_SIZE - 1)) / (PQ_L2_SIZE / 2) - 1);
229         }
230         return avg;
231 }
232
233 /*
234  * vm_pageout_clean:
235  *
236  * Clean the page and remove it from the laundry.  The page must not be
237  * busy on-call.
238  * 
239  * We set the busy bit to cause potential page faults on this page to
240  * block.  Note the careful timing, however, the busy bit isn't set till
241  * late and we cannot do anything that will mess with the page.
242  */
243 static int
244 vm_pageout_clean(vm_page_t m)
245 {
246         vm_object_t object;
247         vm_page_t mc[BLIST_MAX_ALLOC];
248         int error;
249         int ib, is, page_base;
250         vm_pindex_t pindex = m->pindex;
251
252         object = m->object;
253
254         /*
255          * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
256          * with the new swapper, but we could have serious problems paging
257          * out other object types if there is insufficient memory.  
258          *
259          * Unfortunately, checking free memory here is far too late, so the
260          * check has been moved up a procedural level.
261          */
262
263         /*
264          * Don't mess with the page if it's busy, held, or special
265          *
266          * XXX do we really need to check hold_count here?  hold_count
267          * isn't supposed to mess with vm_page ops except prevent the
268          * page from being reused.
269          */
270         if (m->hold_count != 0 || (m->flags & PG_UNMANAGED)) {
271                 vm_page_wakeup(m);
272                 return 0;
273         }
274
275         /*
276          * Place page in cluster.  Align cluster for optimal swap space
277          * allocation (whether it is swap or not).  This is typically ~16-32
278          * pages, which also tends to align the cluster to multiples of the
279          * filesystem block size if backed by a filesystem.
280          */
281         page_base = pindex % BLIST_MAX_ALLOC;
282         mc[page_base] = m;
283         ib = page_base - 1;
284         is = page_base + 1;
285
286         /*
287          * Scan object for clusterable pages.
288          *
289          * We can cluster ONLY if: ->> the page is NOT
290          * clean, wired, busy, held, or mapped into a
291          * buffer, and one of the following:
292          * 1) The page is inactive, or a seldom used
293          *    active page.
294          * -or-
295          * 2) we force the issue.
296          *
297          * During heavy mmap/modification loads the pageout
298          * daemon can really fragment the underlying file
299          * due to flushing pages out of order and not trying
300          * align the clusters (which leave sporatic out-of-order
301          * holes).  To solve this problem we do the reverse scan
302          * first and attempt to align our cluster, then do a 
303          * forward scan if room remains.
304          */
305
306         vm_object_hold(object);
307         while (ib >= 0) {
308                 vm_page_t p;
309
310                 p = vm_page_lookup_busy_try(object, pindex - page_base + ib,
311                                             TRUE, &error);
312                 if (error || p == NULL)
313                         break;
314                 if ((p->queue - p->pc) == PQ_CACHE ||
315                     (p->flags & PG_UNMANAGED)) {
316                         vm_page_wakeup(p);
317                         break;
318                 }
319                 vm_page_test_dirty(p);
320                 if (((p->dirty & p->valid) == 0 &&
321                      (p->flags & PG_NEED_COMMIT) == 0) ||
322                     p->queue - p->pc != PQ_INACTIVE ||
323                     p->wire_count != 0 ||       /* may be held by buf cache */
324                     p->hold_count != 0) {       /* may be undergoing I/O */
325                         vm_page_wakeup(p);
326                         break;
327                 }
328                 mc[ib] = p;
329                 --ib;
330         }
331         ++ib;   /* fixup */
332
333         while (is < BLIST_MAX_ALLOC &&
334                pindex - page_base + is < object->size) {
335                 vm_page_t p;
336
337                 p = vm_page_lookup_busy_try(object, pindex - page_base + is,
338                                             TRUE, &error);
339                 if (error || p == NULL)
340                         break;
341                 if (((p->queue - p->pc) == PQ_CACHE) ||
342                     (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) {
343                         vm_page_wakeup(p);
344                         break;
345                 }
346                 vm_page_test_dirty(p);
347                 if (((p->dirty & p->valid) == 0 &&
348                      (p->flags & PG_NEED_COMMIT) == 0) ||
349                     p->queue - p->pc != PQ_INACTIVE ||
350                     p->wire_count != 0 ||       /* may be held by buf cache */
351                     p->hold_count != 0) {       /* may be undergoing I/O */
352                         vm_page_wakeup(p);
353                         break;
354                 }
355                 mc[is] = p;
356                 ++is;
357         }
358
359         vm_object_drop(object);
360
361         /*
362          * we allow reads during pageouts...
363          */
364         return vm_pageout_flush(&mc[ib], is - ib, 0);
365 }
366
367 /*
368  * vm_pageout_flush() - launder the given pages
369  *
370  *      The given pages are laundered.  Note that we setup for the start of
371  *      I/O ( i.e. busy the page ), mark it read-only, and bump the object
372  *      reference count all in here rather then in the parent.  If we want
373  *      the parent to do more sophisticated things we may have to change
374  *      the ordering.
375  *
376  *      The pages in the array must be busied by the caller and will be
377  *      unbusied by this function.
378  */
379 int
380 vm_pageout_flush(vm_page_t *mc, int count, int flags)
381 {
382         vm_object_t object;
383         int pageout_status[count];
384         int numpagedout = 0;
385         int i;
386
387         /*
388          * Initiate I/O.  Bump the vm_page_t->busy counter.
389          */
390         for (i = 0; i < count; i++) {
391                 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
392                         ("vm_pageout_flush page %p index %d/%d: partially "
393                          "invalid page", mc[i], i, count));
394                 vm_page_io_start(mc[i]);
395         }
396
397         /*
398          * We must make the pages read-only.  This will also force the
399          * modified bit in the related pmaps to be cleared.  The pager
400          * cannot clear the bit for us since the I/O completion code
401          * typically runs from an interrupt.  The act of making the page
402          * read-only handles the case for us.
403          *
404          * Then we can unbusy the pages, we still hold a reference by virtue
405          * of our soft-busy.
406          */
407         for (i = 0; i < count; i++) {
408                 vm_page_protect(mc[i], VM_PROT_READ);
409                 vm_page_wakeup(mc[i]);
410         }
411
412         object = mc[0]->object;
413         vm_object_pip_add(object, count);
414
415         vm_pager_put_pages(object, mc, count,
416             (flags | ((object == &kernel_object) ? VM_PAGER_PUT_SYNC : 0)),
417             pageout_status);
418
419         for (i = 0; i < count; i++) {
420                 vm_page_t mt = mc[i];
421
422                 switch (pageout_status[i]) {
423                 case VM_PAGER_OK:
424                         numpagedout++;
425                         break;
426                 case VM_PAGER_PEND:
427                         numpagedout++;
428                         break;
429                 case VM_PAGER_BAD:
430                         /*
431                          * Page outside of range of object. Right now we
432                          * essentially lose the changes by pretending it
433                          * worked.
434                          */
435                         vm_page_busy_wait(mt, FALSE, "pgbad");
436                         pmap_clear_modify(mt);
437                         vm_page_undirty(mt);
438                         vm_page_wakeup(mt);
439                         break;
440                 case VM_PAGER_ERROR:
441                 case VM_PAGER_FAIL:
442                         /*
443                          * A page typically cannot be paged out when we
444                          * have run out of swap.  We leave the page
445                          * marked inactive and will try to page it out
446                          * again later.
447                          *
448                          * Starvation of the active page list is used to
449                          * determine when the system is massively memory
450                          * starved.
451                          */
452                         break;
453                 case VM_PAGER_AGAIN:
454                         break;
455                 }
456
457                 /*
458                  * If the operation is still going, leave the page busy to
459                  * block all other accesses. Also, leave the paging in
460                  * progress indicator set so that we don't attempt an object
461                  * collapse.
462                  *
463                  * For any pages which have completed synchronously, 
464                  * deactivate the page if we are under a severe deficit.
465                  * Do not try to enter them into the cache, though, they
466                  * might still be read-heavy.
467                  */
468                 if (pageout_status[i] != VM_PAGER_PEND) {
469                         vm_page_busy_wait(mt, FALSE, "pgouw");
470                         if (vm_page_count_severe())
471                                 vm_page_deactivate(mt);
472 #if 0
473                         if (!vm_page_count_severe() || !vm_page_try_to_cache(mt))
474                                 vm_page_protect(mt, VM_PROT_READ);
475 #endif
476                         vm_page_io_finish(mt);
477                         vm_page_wakeup(mt);
478                         vm_object_pip_wakeup(object);
479                 }
480         }
481         return numpagedout;
482 }
483
484 #if !defined(NO_SWAPPING)
485 /*
486  * deactivate enough pages to satisfy the inactive target
487  * requirements or if vm_page_proc_limit is set, then
488  * deactivate all of the pages in the object and its
489  * backing_objects.
490  *
491  * The map must be locked.
492  * The caller must hold the vm_object.
493  */
494 static int vm_pageout_object_deactivate_pages_callback(vm_page_t, void *);
495
496 static void
497 vm_pageout_object_deactivate_pages(vm_map_t map, vm_object_t object,
498                                    vm_pindex_t desired, int map_remove_only)
499 {
500         struct rb_vm_page_scan_info info;
501         vm_object_t lobject;
502         vm_object_t tobject;
503         int remove_mode;
504
505         ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
506         lobject = object;
507
508         while (lobject) {
509                 if (pmap_resident_count(vm_map_pmap(map)) <= desired)
510                         break;
511                 if (lobject->type == OBJT_DEVICE ||
512                     lobject->type == OBJT_MGTDEVICE ||
513                     lobject->type == OBJT_PHYS)
514                         break;
515                 if (lobject->paging_in_progress)
516                         break;
517
518                 remove_mode = map_remove_only;
519                 if (lobject->shadow_count > 1)
520                         remove_mode = 1;
521
522                 /*
523                  * scan the objects entire memory queue.  We hold the
524                  * object's token so the scan should not race anything.
525                  */
526                 info.limit = remove_mode;
527                 info.map = map;
528                 info.desired = desired;
529                 vm_page_rb_tree_RB_SCAN(&lobject->rb_memq, NULL,
530                                 vm_pageout_object_deactivate_pages_callback,
531                                 &info
532                 );
533                 while ((tobject = lobject->backing_object) != NULL) {
534                         KKASSERT(tobject != object);
535                         vm_object_hold(tobject);
536                         if (tobject == lobject->backing_object)
537                                 break;
538                         vm_object_drop(tobject);
539                 }
540                 if (lobject != object) {
541                         if (tobject)
542                                 vm_object_lock_swap();
543                         vm_object_drop(lobject);
544                         /* leaves tobject locked & at top */
545                 }
546                 lobject = tobject;
547         }
548         if (lobject != object)
549                 vm_object_drop(lobject);        /* NULL ok */
550 }
551
552 /*
553  * The caller must hold the vm_object.
554  */
555 static int
556 vm_pageout_object_deactivate_pages_callback(vm_page_t p, void *data)
557 {
558         struct rb_vm_page_scan_info *info = data;
559         int actcount;
560
561         if (pmap_resident_count(vm_map_pmap(info->map)) <= info->desired) {
562                 return(-1);
563         }
564         mycpu->gd_cnt.v_pdpages++;
565
566         if (vm_page_busy_try(p, TRUE))
567                 return(0);
568         if (p->wire_count || p->hold_count || (p->flags & PG_UNMANAGED)) {
569                 vm_page_wakeup(p);
570                 return(0);
571         }
572         if (!pmap_page_exists_quick(vm_map_pmap(info->map), p)) {
573                 vm_page_wakeup(p);
574                 return(0);
575         }
576
577         actcount = pmap_ts_referenced(p);
578         if (actcount) {
579                 vm_page_flag_set(p, PG_REFERENCED);
580         } else if (p->flags & PG_REFERENCED) {
581                 actcount = 1;
582         }
583
584         vm_page_and_queue_spin_lock(p);
585         if (p->queue - p->pc != PQ_ACTIVE && (p->flags & PG_REFERENCED)) {
586                 vm_page_and_queue_spin_unlock(p);
587                 vm_page_activate(p);
588                 p->act_count += actcount;
589                 vm_page_flag_clear(p, PG_REFERENCED);
590         } else if (p->queue - p->pc == PQ_ACTIVE) {
591                 if ((p->flags & PG_REFERENCED) == 0) {
592                         p->act_count -= min(p->act_count, ACT_DECLINE);
593                         if (!info->limit &&
594                             (vm_pageout_algorithm || (p->act_count == 0))) {
595                                 vm_page_and_queue_spin_unlock(p);
596                                 vm_page_protect(p, VM_PROT_NONE);
597                                 vm_page_deactivate(p);
598                         } else {
599                                 TAILQ_REMOVE(&vm_page_queues[p->queue].pl,
600                                              p, pageq);
601                                 TAILQ_INSERT_TAIL(&vm_page_queues[p->queue].pl,
602                                                   p, pageq);
603                                 vm_page_and_queue_spin_unlock(p);
604                         }
605                 } else {
606                         vm_page_and_queue_spin_unlock(p);
607                         vm_page_activate(p);
608                         vm_page_flag_clear(p, PG_REFERENCED);
609
610                         vm_page_and_queue_spin_lock(p);
611                         if (p->queue - p->pc == PQ_ACTIVE) {
612                                 if (p->act_count < (ACT_MAX - ACT_ADVANCE))
613                                         p->act_count += ACT_ADVANCE;
614                                 TAILQ_REMOVE(&vm_page_queues[p->queue].pl,
615                                              p, pageq);
616                                 TAILQ_INSERT_TAIL(&vm_page_queues[p->queue].pl,
617                                                   p, pageq);
618                         }
619                         vm_page_and_queue_spin_unlock(p);
620                 }
621         } else if (p->queue - p->pc == PQ_INACTIVE) {
622                 vm_page_and_queue_spin_unlock(p);
623                 vm_page_protect(p, VM_PROT_NONE);
624         } else {
625                 vm_page_and_queue_spin_unlock(p);
626         }
627         vm_page_wakeup(p);
628         return(0);
629 }
630
631 /*
632  * Deactivate some number of pages in a map, try to do it fairly, but
633  * that is really hard to do.
634  */
635 static void
636 vm_pageout_map_deactivate_pages(vm_map_t map, vm_pindex_t desired)
637 {
638         vm_map_entry_t tmpe;
639         vm_object_t obj, bigobj;
640         int nothingwired;
641
642         if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT)) {
643                 return;
644         }
645
646         bigobj = NULL;
647         nothingwired = TRUE;
648
649         /*
650          * first, search out the biggest object, and try to free pages from
651          * that.
652          */
653         tmpe = map->header.next;
654         while (tmpe != &map->header) {
655                 switch(tmpe->maptype) {
656                 case VM_MAPTYPE_NORMAL:
657                 case VM_MAPTYPE_VPAGETABLE:
658                         obj = tmpe->object.vm_object;
659                         if ((obj != NULL) && (obj->shadow_count <= 1) &&
660                                 ((bigobj == NULL) ||
661                                  (bigobj->resident_page_count < obj->resident_page_count))) {
662                                 bigobj = obj;
663                         }
664                         break;
665                 default:
666                         break;
667                 }
668                 if (tmpe->wired_count > 0)
669                         nothingwired = FALSE;
670                 tmpe = tmpe->next;
671         }
672
673         if (bigobj)  {
674                 vm_object_hold(bigobj);
675                 vm_pageout_object_deactivate_pages(map, bigobj, desired, 0);
676                 vm_object_drop(bigobj);
677         }
678
679         /*
680          * Next, hunt around for other pages to deactivate.  We actually
681          * do this search sort of wrong -- .text first is not the best idea.
682          */
683         tmpe = map->header.next;
684         while (tmpe != &map->header) {
685                 if (pmap_resident_count(vm_map_pmap(map)) <= desired)
686                         break;
687                 switch(tmpe->maptype) {
688                 case VM_MAPTYPE_NORMAL:
689                 case VM_MAPTYPE_VPAGETABLE:
690                         obj = tmpe->object.vm_object;
691                         if (obj) {
692                                 vm_object_hold(obj);
693                                 vm_pageout_object_deactivate_pages(map, obj, desired, 0);
694                                 vm_object_drop(obj);
695                         }
696                         break;
697                 default:
698                         break;
699                 }
700                 tmpe = tmpe->next;
701         }
702
703         /*
704          * Remove all mappings if a process is swapped out, this will free page
705          * table pages.
706          */
707         if (desired == 0 && nothingwired)
708                 pmap_remove(vm_map_pmap(map),
709                             VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
710         vm_map_unlock(map);
711 }
712 #endif
713
714 /*
715  * Called when the pageout scan wants to free a page.  We no longer
716  * try to cycle the vm_object here with a reference & dealloc, which can
717  * cause a non-trivial object collapse in a critical path.
718  *
719  * It is unclear why we cycled the ref_count in the past, perhaps to try
720  * to optimize shadow chain collapses but I don't quite see why it would
721  * be necessary.  An OBJ_DEAD object should terminate any and all vm_pages
722  * synchronously and not have to be kicked-start.
723  */
724 static void
725 vm_pageout_page_free(vm_page_t m) 
726 {
727         vm_page_protect(m, VM_PROT_NONE);
728         vm_page_free(m);
729 }
730
731 /*
732  * vm_pageout_scan does the dirty work for the pageout daemon.
733  */
734 struct vm_pageout_scan_info {
735         struct proc *bigproc;
736         vm_offset_t bigsize;
737 };
738
739 static int vm_pageout_scan_callback(struct proc *p, void *data);
740
741 static int
742 vm_pageout_scan_inactive(int pass, int q, int avail_shortage,
743                          int *vnodes_skippedp)
744 {
745         vm_page_t m;
746         struct vm_page marker;
747         struct vnode *vpfailed;         /* warning, allowed to be stale */
748         int maxscan;
749         int count;
750         int delta = 0;
751         vm_object_t object;
752         int actcount;
753         int maxlaunder;
754
755         /*
756          * Start scanning the inactive queue for pages we can move to the
757          * cache or free.  The scan will stop when the target is reached or
758          * we have scanned the entire inactive queue.  Note that m->act_count
759          * is not used to form decisions for the inactive queue, only for the
760          * active queue.
761          *
762          * maxlaunder limits the number of dirty pages we flush per scan.
763          * For most systems a smaller value (16 or 32) is more robust under
764          * extreme memory and disk pressure because any unnecessary writes
765          * to disk can result in extreme performance degredation.  However,
766          * systems with excessive dirty pages (especially when MAP_NOSYNC is
767          * used) will die horribly with limited laundering.  If the pageout
768          * daemon cannot clean enough pages in the first pass, we let it go
769          * all out in succeeding passes.
770          */
771         if ((maxlaunder = vm_max_launder) <= 1)
772                 maxlaunder = 1;
773         if (pass)
774                 maxlaunder = 10000;
775
776         /*
777          * Initialize our marker
778          */
779         bzero(&marker, sizeof(marker));
780         marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
781         marker.queue = PQ_INACTIVE + q;
782         marker.pc = q;
783         marker.wire_count = 1;
784
785         /*
786          * Inactive queue scan.
787          *
788          * NOTE: The vm_page must be spinlocked before the queue to avoid
789          *       deadlocks, so it is easiest to simply iterate the loop
790          *       with the queue unlocked at the top.
791          */
792         vpfailed = NULL;
793
794         vm_page_queues_spin_lock(PQ_INACTIVE + q);
795         TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
796         maxscan = vm_page_queues[PQ_INACTIVE + q].lcnt;
797
798         /*
799          * Queue locked at top of loop to avoid stack marker issues.
800          */
801         while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
802                maxscan-- > 0 && avail_shortage - delta > 0)
803         {
804                 KKASSERT(m->queue - m->pc == PQ_INACTIVE);
805                 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl,
806                              &marker, pageq);
807                 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE + q].pl, m,
808                                    &marker, pageq);
809                 mycpu->gd_cnt.v_pdpages++;
810
811                 /*
812                  * Skip marker pages (atomic against other markers to avoid
813                  * infinite hop-over scans).
814                  */
815                 if (m->flags & PG_MARKER)
816                         continue;
817
818                 /*
819                  * Try to busy the page.  Don't mess with pages which are
820                  * already busy or reorder them in the queue.
821                  */
822                 if (vm_page_busy_try(m, TRUE))
823                         continue;
824
825                 /*
826                  * Remaining operations run with the page busy and neither
827                  * the page or the queue will be spin-locked.
828                  */
829                 vm_page_queues_spin_unlock(PQ_INACTIVE + q);
830                 KKASSERT(m->queue - m->pc == PQ_INACTIVE);
831                 lwkt_yield();
832
833                 /*
834                  * It is possible for a page to be busied ad-hoc (e.g. the
835                  * pmap_collect() code) and wired and race against the
836                  * allocation of a new page.  vm_page_alloc() may be forced
837                  * to deactivate the wired page in which case it winds up
838                  * on the inactive queue and must be handled here.  We
839                  * correct the problem simply by unqueuing the page.
840                  */
841                 if (m->wire_count) {
842                         vm_page_unqueue_nowakeup(m);
843                         vm_page_wakeup(m);
844                         kprintf("WARNING: pagedaemon: wired page on "
845                                 "inactive queue %p\n", m);
846                         goto next;
847                 }
848
849                 /*
850                  * A held page may be undergoing I/O, so skip it.
851                  */
852                 if (m->hold_count) {
853                         vm_page_and_queue_spin_lock(m);
854                         if (m->queue - m->pc == PQ_INACTIVE) {
855                                 TAILQ_REMOVE(
856                                         &vm_page_queues[PQ_INACTIVE + q].pl,
857                                         m, pageq);
858                                 TAILQ_INSERT_TAIL(
859                                         &vm_page_queues[PQ_INACTIVE + q].pl,
860                                         m, pageq);
861                                 ++vm_swapcache_inactive_heuristic;
862                         }
863                         vm_page_and_queue_spin_unlock(m);
864                         vm_page_wakeup(m);
865                         goto next;
866                 }
867
868                 if (m->object == NULL || m->object->ref_count == 0) {
869                         /*
870                          * If the object is not being used, we ignore previous 
871                          * references.
872                          */
873                         vm_page_flag_clear(m, PG_REFERENCED);
874                         pmap_clear_reference(m);
875                         /* fall through to end */
876                 } else if (((m->flags & PG_REFERENCED) == 0) &&
877                             (actcount = pmap_ts_referenced(m))) {
878                         /*
879                          * Otherwise, if the page has been referenced while 
880                          * in the inactive queue, we bump the "activation
881                          * count" upwards, making it less likely that the
882                          * page will be added back to the inactive queue
883                          * prematurely again.  Here we check the page tables
884                          * (or emulated bits, if any), given the upper level
885                          * VM system not knowing anything about existing 
886                          * references.
887                          */
888                         vm_page_activate(m);
889                         m->act_count += (actcount + ACT_ADVANCE);
890                         vm_page_wakeup(m);
891                         goto next;
892                 }
893
894                 /*
895                  * (m) is still busied.
896                  *
897                  * If the upper level VM system knows about any page 
898                  * references, we activate the page.  We also set the 
899                  * "activation count" higher than normal so that we will less 
900                  * likely place pages back onto the inactive queue again.
901                  */
902                 if ((m->flags & PG_REFERENCED) != 0) {
903                         vm_page_flag_clear(m, PG_REFERENCED);
904                         actcount = pmap_ts_referenced(m);
905                         vm_page_activate(m);
906                         m->act_count += (actcount + ACT_ADVANCE + 1);
907                         vm_page_wakeup(m);
908                         goto next;
909                 }
910
911                 /*
912                  * If the upper level VM system doesn't know anything about 
913                  * the page being dirty, we have to check for it again.  As 
914                  * far as the VM code knows, any partially dirty pages are 
915                  * fully dirty.
916                  *
917                  * Pages marked PG_WRITEABLE may be mapped into the user
918                  * address space of a process running on another cpu.  A
919                  * user process (without holding the MP lock) running on
920                  * another cpu may be able to touch the page while we are
921                  * trying to remove it.  vm_page_cache() will handle this
922                  * case for us.
923                  */
924                 if (m->dirty == 0) {
925                         vm_page_test_dirty(m);
926                 } else {
927                         vm_page_dirty(m);
928                 }
929
930                 if (m->valid == 0 && (m->flags & PG_NEED_COMMIT) == 0) {
931                         /*
932                          * Invalid pages can be easily freed
933                          */
934                         vm_pageout_page_free(m);
935                         mycpu->gd_cnt.v_dfree++;
936                         ++delta;
937                 } else if (m->dirty == 0 && (m->flags & PG_NEED_COMMIT) == 0) {
938                         /*
939                          * Clean pages can be placed onto the cache queue.
940                          * This effectively frees them.
941                          */
942                         vm_page_cache(m);
943                         ++delta;
944                 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) {
945                         /*
946                          * Dirty pages need to be paged out, but flushing
947                          * a page is extremely expensive verses freeing
948                          * a clean page.  Rather then artificially limiting
949                          * the number of pages we can flush, we instead give
950                          * dirty pages extra priority on the inactive queue
951                          * by forcing them to be cycled through the queue
952                          * twice before being flushed, after which the 
953                          * (now clean) page will cycle through once more
954                          * before being freed.  This significantly extends
955                          * the thrash point for a heavily loaded machine.
956                          */
957                         vm_page_flag_set(m, PG_WINATCFLS);
958                         vm_page_and_queue_spin_lock(m);
959                         if (m->queue - m->pc == PQ_INACTIVE) {
960                                 TAILQ_REMOVE(
961                                         &vm_page_queues[PQ_INACTIVE + q].pl,
962                                         m, pageq);
963                                 TAILQ_INSERT_TAIL(
964                                         &vm_page_queues[PQ_INACTIVE + q].pl,
965                                         m, pageq);
966                                 ++vm_swapcache_inactive_heuristic;
967                         }
968                         vm_page_and_queue_spin_unlock(m);
969                         vm_page_wakeup(m);
970                 } else if (maxlaunder > 0) {
971                         /*
972                          * We always want to try to flush some dirty pages if
973                          * we encounter them, to keep the system stable.
974                          * Normally this number is small, but under extreme
975                          * pressure where there are insufficient clean pages
976                          * on the inactive queue, we may have to go all out.
977                          */
978                         int swap_pageouts_ok;
979                         struct vnode *vp = NULL;
980
981                         swap_pageouts_ok = 0;
982                         object = m->object;
983                         if (object &&
984                             (object->type != OBJT_SWAP) && 
985                             (object->type != OBJT_DEFAULT)) {
986                                 swap_pageouts_ok = 1;
987                         } else {
988                                 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
989                                 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
990                                 vm_page_count_min(0));
991                                                                                 
992                         }
993
994                         /*
995                          * We don't bother paging objects that are "dead".  
996                          * Those objects are in a "rundown" state.
997                          */
998                         if (!swap_pageouts_ok || 
999                             (object == NULL) ||
1000                             (object->flags & OBJ_DEAD)) {
1001                                 vm_page_and_queue_spin_lock(m);
1002                                 if (m->queue - m->pc == PQ_INACTIVE) {
1003                                         TAILQ_REMOVE(
1004                                             &vm_page_queues[PQ_INACTIVE + q].pl,
1005                                             m, pageq);
1006                                         TAILQ_INSERT_TAIL(
1007                                             &vm_page_queues[PQ_INACTIVE + q].pl,
1008                                             m, pageq);
1009                                         ++vm_swapcache_inactive_heuristic;
1010                                 }
1011                                 vm_page_and_queue_spin_unlock(m);
1012                                 vm_page_wakeup(m);
1013                                 goto next;
1014                         }
1015
1016                         /*
1017                          * (m) is still busied.
1018                          *
1019                          * The object is already known NOT to be dead.   It
1020                          * is possible for the vget() to block the whole
1021                          * pageout daemon, but the new low-memory handling
1022                          * code should prevent it.
1023                          *
1024                          * The previous code skipped locked vnodes and, worse,
1025                          * reordered pages in the queue.  This results in
1026                          * completely non-deterministic operation because,
1027                          * quite often, a vm_fault has initiated an I/O and
1028                          * is holding a locked vnode at just the point where
1029                          * the pageout daemon is woken up.
1030                          *
1031                          * We can't wait forever for the vnode lock, we might
1032                          * deadlock due to a vn_read() getting stuck in
1033                          * vm_wait while holding this vnode.  We skip the 
1034                          * vnode if we can't get it in a reasonable amount
1035                          * of time.
1036                          *
1037                          * vpfailed is used to (try to) avoid the case where
1038                          * a large number of pages are associated with a
1039                          * locked vnode, which could cause the pageout daemon
1040                          * to stall for an excessive amount of time.
1041                          */
1042                         if (object->type == OBJT_VNODE) {
1043                                 int flags;
1044
1045                                 vp = object->handle;
1046                                 flags = LK_EXCLUSIVE;
1047                                 if (vp == vpfailed)
1048                                         flags |= LK_NOWAIT;
1049                                 else
1050                                         flags |= LK_TIMELOCK;
1051                                 vm_page_hold(m);
1052                                 vm_page_wakeup(m);
1053
1054                                 /*
1055                                  * We have unbusied (m) temporarily so we can
1056                                  * acquire the vp lock without deadlocking.
1057                                  * (m) is held to prevent destruction.
1058                                  */
1059                                 if (vget(vp, flags) != 0) {
1060                                         vpfailed = vp;
1061                                         ++pageout_lock_miss;
1062                                         if (object->flags & OBJ_MIGHTBEDIRTY)
1063                                                     ++*vnodes_skippedp;
1064                                         vm_page_unhold(m);
1065                                         goto next;
1066                                 }
1067
1068                                 /*
1069                                  * The page might have been moved to another
1070                                  * queue during potential blocking in vget()
1071                                  * above.  The page might have been freed and
1072                                  * reused for another vnode.  The object might
1073                                  * have been reused for another vnode.
1074                                  */
1075                                 if (m->queue - m->pc != PQ_INACTIVE ||
1076                                     m->object != object ||
1077                                     object->handle != vp) {
1078                                         if (object->flags & OBJ_MIGHTBEDIRTY)
1079                                                 ++*vnodes_skippedp;
1080                                         vput(vp);
1081                                         vm_page_unhold(m);
1082                                         goto next;
1083                                 }
1084         
1085                                 /*
1086                                  * The page may have been busied during the
1087                                  * blocking in vput();  We don't move the
1088                                  * page back onto the end of the queue so that
1089                                  * statistics are more correct if we don't.
1090                                  */
1091                                 if (vm_page_busy_try(m, TRUE)) {
1092                                         vput(vp);
1093                                         vm_page_unhold(m);
1094                                         goto next;
1095                                 }
1096                                 vm_page_unhold(m);
1097
1098                                 /*
1099                                  * (m) is busied again
1100                                  *
1101                                  * We own the busy bit and remove our hold
1102                                  * bit.  If the page is still held it
1103                                  * might be undergoing I/O, so skip it.
1104                                  */
1105                                 if (m->hold_count) {
1106                                         vm_page_and_queue_spin_lock(m);
1107                                         if (m->queue - m->pc == PQ_INACTIVE) {
1108                                                 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl, m, pageq);
1109                                                 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE + q].pl, m, pageq);
1110                                                 ++vm_swapcache_inactive_heuristic;
1111                                         }
1112                                         vm_page_and_queue_spin_unlock(m);
1113                                         if (object->flags & OBJ_MIGHTBEDIRTY)
1114                                                 ++*vnodes_skippedp;
1115                                         vm_page_wakeup(m);
1116                                         vput(vp);
1117                                         goto next;
1118                                 }
1119                                 /* (m) is left busied as we fall through */
1120                         }
1121
1122                         /*
1123                          * page is busy and not held here.
1124                          *
1125                          * If a page is dirty, then it is either being washed
1126                          * (but not yet cleaned) or it is still in the
1127                          * laundry.  If it is still in the laundry, then we
1128                          * start the cleaning operation. 
1129                          *
1130                          * decrement inactive_shortage on success to account
1131                          * for the (future) cleaned page.  Otherwise we
1132                          * could wind up laundering or cleaning too many
1133                          * pages.
1134                          */
1135                         count = vm_pageout_clean(m);
1136                         delta += count;
1137                         maxlaunder -= count;
1138
1139                         /*
1140                          * Clean ate busy, page no longer accessible
1141                          */
1142                         if (vp != NULL)
1143                                 vput(vp);
1144                 } else {
1145                         vm_page_wakeup(m);
1146                 }
1147
1148 next:
1149                 /*
1150                  * Systems with a ton of memory can wind up with huge
1151                  * deactivation counts.  Because the inactive scan is
1152                  * doing a lot of flushing, the combination can result
1153                  * in excessive paging even in situations where other
1154                  * unrelated threads free up sufficient VM.
1155                  *
1156                  * To deal with this we abort the nominal active->inactive
1157                  * scan before we hit the inactive target when free+cache
1158                  * levels have reached a reasonable target.
1159                  *
1160                  * When deciding to stop early we need to add some slop to
1161                  * the test and we need to return full completion to the caller
1162                  * to prevent the caller from thinking there is something
1163                  * wrong and issuing a low-memory+swap warning or pkill.
1164                  */
1165                 vm_page_queues_spin_lock(PQ_INACTIVE + q);
1166                 if (vm_paging_target() < -vm_max_launder) {
1167                         /*
1168                          * Stopping early, return full completion to caller.
1169                          */
1170                         if (delta < avail_shortage)
1171                                 delta = avail_shortage;
1172                         break;
1173                 }
1174         }
1175
1176         /* page queue still spin-locked */
1177         TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
1178         vm_page_queues_spin_unlock(PQ_INACTIVE + q);
1179
1180         return (delta);
1181 }
1182
1183 static int
1184 vm_pageout_scan_active(int pass, int q,
1185                        int avail_shortage, int inactive_shortage,
1186                        int *recycle_countp)
1187 {
1188         struct vm_page marker;
1189         vm_page_t m;
1190         int actcount;
1191         int delta = 0;
1192         int maxscan;
1193
1194         /*
1195          * We want to move pages from the active queue to the inactive
1196          * queue to get the inactive queue to the inactive target.  If
1197          * we still have a page shortage from above we try to directly free
1198          * clean pages instead of moving them.
1199          *
1200          * If we do still have a shortage we keep track of the number of
1201          * pages we free or cache (recycle_count) as a measure of thrashing
1202          * between the active and inactive queues.
1203          *
1204          * If we were able to completely satisfy the free+cache targets
1205          * from the inactive pool we limit the number of pages we move
1206          * from the active pool to the inactive pool to 2x the pages we
1207          * had removed from the inactive pool (with a minimum of 1/5 the
1208          * inactive target).  If we were not able to completely satisfy
1209          * the free+cache targets we go for the whole target aggressively.
1210          *
1211          * NOTE: Both variables can end up negative.
1212          * NOTE: We are still in a critical section.
1213          */
1214
1215         bzero(&marker, sizeof(marker));
1216         marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
1217         marker.queue = PQ_ACTIVE + q;
1218         marker.pc = q;
1219         marker.wire_count = 1;
1220
1221         vm_page_queues_spin_lock(PQ_ACTIVE + q);
1222         TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1223         maxscan = vm_page_queues[PQ_ACTIVE + q].lcnt;
1224
1225         /*
1226          * Queue locked at top of loop to avoid stack marker issues.
1227          */
1228         while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
1229                maxscan-- > 0 && (avail_shortage - delta > 0 ||
1230                                 inactive_shortage > 0))
1231         {
1232                 KKASSERT(m->queue - m->pc == PQ_ACTIVE);
1233                 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl,
1234                              &marker, pageq);
1235                 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
1236                                    &marker, pageq);
1237
1238                 /*
1239                  * Skip marker pages (atomic against other markers to avoid
1240                  * infinite hop-over scans).
1241                  */
1242                 if (m->flags & PG_MARKER)
1243                         continue;
1244
1245                 /*
1246                  * Try to busy the page.  Don't mess with pages which are
1247                  * already busy or reorder them in the queue.
1248                  */
1249                 if (vm_page_busy_try(m, TRUE))
1250                         continue;
1251
1252                 /*
1253                  * Remaining operations run with the page busy and neither
1254                  * the page or the queue will be spin-locked.
1255                  */
1256                 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1257                 KKASSERT(m->queue - m->pc == PQ_ACTIVE);
1258                 lwkt_yield();
1259
1260                 /*
1261                  * Don't deactivate pages that are held, even if we can
1262                  * busy them.  (XXX why not?)
1263                  */
1264                 if (m->hold_count != 0) {
1265                         vm_page_and_queue_spin_lock(m);
1266                         if (m->queue - m->pc == PQ_ACTIVE) {
1267                                 TAILQ_REMOVE(
1268                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1269                                         m, pageq);
1270                                 TAILQ_INSERT_TAIL(
1271                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1272                                         m, pageq);
1273                         }
1274                         vm_page_and_queue_spin_unlock(m);
1275                         vm_page_wakeup(m);
1276                         goto next;
1277                 }
1278
1279                 /*
1280                  * The count for pagedaemon pages is done after checking the
1281                  * page for eligibility...
1282                  */
1283                 mycpu->gd_cnt.v_pdpages++;
1284
1285                 /*
1286                  * Check to see "how much" the page has been used and clear
1287                  * the tracking access bits.  If the object has no references
1288                  * don't bother paying the expense.
1289                  */
1290                 actcount = 0;
1291                 if (m->object && m->object->ref_count != 0) {
1292                         if (m->flags & PG_REFERENCED)
1293                                 ++actcount;
1294                         actcount += pmap_ts_referenced(m);
1295                         if (actcount) {
1296                                 m->act_count += ACT_ADVANCE + actcount;
1297                                 if (m->act_count > ACT_MAX)
1298                                         m->act_count = ACT_MAX;
1299                         }
1300                 }
1301                 vm_page_flag_clear(m, PG_REFERENCED);
1302
1303                 /*
1304                  * actcount is only valid if the object ref_count is non-zero.
1305                  * If the page does not have an object, actcount will be zero.
1306                  */
1307                 if (actcount && m->object->ref_count != 0) {
1308                         vm_page_and_queue_spin_lock(m);
1309                         if (m->queue - m->pc == PQ_ACTIVE) {
1310                                 TAILQ_REMOVE(
1311                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1312                                         m, pageq);
1313                                 TAILQ_INSERT_TAIL(
1314                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1315                                         m, pageq);
1316                         }
1317                         vm_page_and_queue_spin_unlock(m);
1318                         vm_page_wakeup(m);
1319                 } else {
1320                         switch(m->object->type) {
1321                         case OBJT_DEFAULT:
1322                         case OBJT_SWAP:
1323                                 m->act_count -= min(m->act_count,
1324                                                     vm_anonmem_decline);
1325                                 break;
1326                         default:
1327                                 m->act_count -= min(m->act_count,
1328                                                     vm_filemem_decline);
1329                                 break;
1330                         }
1331                         if (vm_pageout_algorithm ||
1332                             (m->object == NULL) ||
1333                             (m->object && (m->object->ref_count == 0)) ||
1334                             m->act_count < pass + 1
1335                         ) {
1336                                 /*
1337                                  * Deactivate the page.  If we had a
1338                                  * shortage from our inactive scan try to
1339                                  * free (cache) the page instead.
1340                                  *
1341                                  * Don't just blindly cache the page if
1342                                  * we do not have a shortage from the
1343                                  * inactive scan, that could lead to
1344                                  * gigabytes being moved.
1345                                  */
1346                                 --inactive_shortage;
1347                                 if (avail_shortage - delta > 0 ||
1348                                     (m->object && (m->object->ref_count == 0)))
1349                                 {
1350                                         if (avail_shortage - delta > 0)
1351                                                 ++*recycle_countp;
1352                                         vm_page_protect(m, VM_PROT_NONE);
1353                                         if (m->dirty == 0 &&
1354                                             (m->flags & PG_NEED_COMMIT) == 0 &&
1355                                             avail_shortage - delta > 0) {
1356                                                 vm_page_cache(m);
1357                                         } else {
1358                                                 vm_page_deactivate(m);
1359                                                 vm_page_wakeup(m);
1360                                         }
1361                                 } else {
1362                                         vm_page_deactivate(m);
1363                                         vm_page_wakeup(m);
1364                                 }
1365                                 ++delta;
1366                         } else {
1367                                 vm_page_and_queue_spin_lock(m);
1368                                 if (m->queue - m->pc == PQ_ACTIVE) {
1369                                         TAILQ_REMOVE(
1370                                             &vm_page_queues[PQ_ACTIVE + q].pl,
1371                                             m, pageq);
1372                                         TAILQ_INSERT_TAIL(
1373                                             &vm_page_queues[PQ_ACTIVE + q].pl,
1374                                             m, pageq);
1375                                 }
1376                                 vm_page_and_queue_spin_unlock(m);
1377                                 vm_page_wakeup(m);
1378                         }
1379                 }
1380 next:
1381                 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1382         }
1383
1384         /*
1385          * Clean out our local marker.
1386          *
1387          * Page queue still spin-locked.
1388          */
1389         TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1390         vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1391
1392         return (delta);
1393 }
1394
1395 /*
1396  * The number of actually free pages can drop down to v_free_reserved,
1397  * we try to build the free count back above v_free_min.  Note that
1398  * vm_paging_needed() also returns TRUE if v_free_count is not at
1399  * least v_free_min so that is the minimum we must build the free
1400  * count to.
1401  *
1402  * We use a slightly higher target to improve hysteresis,
1403  * ((v_free_target + v_free_min) / 2).  Since v_free_target
1404  * is usually the same as v_cache_min this maintains about
1405  * half the pages in the free queue as are in the cache queue,
1406  * providing pretty good pipelining for pageout operation.
1407  *
1408  * The system operator can manipulate vm.v_cache_min and
1409  * vm.v_free_target to tune the pageout demon.  Be sure
1410  * to keep vm.v_free_min < vm.v_free_target.
1411  *
1412  * Note that the original paging target is to get at least
1413  * (free_min + cache_min) into (free + cache).  The slightly
1414  * higher target will shift additional pages from cache to free
1415  * without effecting the original paging target in order to
1416  * maintain better hysteresis and not have the free count always
1417  * be dead-on v_free_min.
1418  *
1419  * NOTE: we are still in a critical section.
1420  *
1421  * Pages moved from PQ_CACHE to totally free are not counted in the
1422  * pages_freed counter.
1423  */
1424 static void
1425 vm_pageout_scan_cache(int avail_shortage, int pass,
1426                       int vnodes_skipped, int recycle_count)
1427 {
1428         static int lastkillticks;
1429         struct vm_pageout_scan_info info;
1430         vm_page_t m;
1431
1432         while (vmstats.v_free_count <
1433                (vmstats.v_free_min + vmstats.v_free_target) / 2) {
1434                 /*
1435                  * This steals some code from vm/vm_page.c
1436                  */
1437                 static int cache_rover = 0;
1438
1439                 m = vm_page_list_find(PQ_CACHE,
1440                                       cache_rover & PQ_L2_MASK, FALSE);
1441                 if (m == NULL)
1442                         break;
1443                 /* page is returned removed from its queue and spinlocked */
1444                 if (vm_page_busy_try(m, TRUE)) {
1445                         vm_page_deactivate_locked(m);
1446                         vm_page_spin_unlock(m);
1447                         continue;
1448                 }
1449                 vm_page_spin_unlock(m);
1450                 pagedaemon_wakeup();
1451                 lwkt_yield();
1452
1453                 /*
1454                  * Remaining operations run with the page busy and neither
1455                  * the page or the queue will be spin-locked.
1456                  */
1457                 if ((m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) ||
1458                     m->hold_count ||
1459                     m->wire_count) {
1460                         vm_page_deactivate(m);
1461                         vm_page_wakeup(m);
1462                         continue;
1463                 }
1464                 KKASSERT((m->flags & PG_MAPPED) == 0);
1465                 KKASSERT(m->dirty == 0);
1466                 cache_rover += PQ_PRIME2;
1467                 vm_pageout_page_free(m);
1468                 mycpu->gd_cnt.v_dfree++;
1469         }
1470
1471 #if !defined(NO_SWAPPING)
1472         /*
1473          * Idle process swapout -- run once per second.
1474          */
1475         if (vm_swap_idle_enabled) {
1476                 static time_t lsec;
1477                 if (time_uptime != lsec) {
1478                         vm_pageout_req_swapout |= VM_SWAP_IDLE;
1479                         vm_req_vmdaemon();
1480                         lsec = time_uptime;
1481                 }
1482         }
1483 #endif
1484                 
1485         /*
1486          * If we didn't get enough free pages, and we have skipped a vnode
1487          * in a writeable object, wakeup the sync daemon.  And kick swapout
1488          * if we did not get enough free pages.
1489          */
1490         if (vm_paging_target() > 0) {
1491                 if (vnodes_skipped && vm_page_count_min(0))
1492                         speedup_syncer(NULL);
1493 #if !defined(NO_SWAPPING)
1494                 if (vm_swap_enabled && vm_page_count_target()) {
1495                         vm_req_vmdaemon();
1496                         vm_pageout_req_swapout |= VM_SWAP_NORMAL;
1497                 }
1498 #endif
1499         }
1500
1501         /*
1502          * Handle catastrophic conditions.  Under good conditions we should
1503          * be at the target, well beyond our minimum.  If we could not even
1504          * reach our minimum the system is under heavy stress.  But just being
1505          * under heavy stress does not trigger process killing.
1506          *
1507          * We consider ourselves to have run out of memory if the swap pager
1508          * is full and avail_shortage is still positive.  The secondary check
1509          * ensures that we do not kill processes if the instantanious
1510          * availability is good, even if the pageout demon pass says it
1511          * couldn't get to the target.
1512          */
1513         if (swap_pager_almost_full &&
1514             pass > 0 &&
1515             (vm_page_count_min(recycle_count) || avail_shortage > 0)) {
1516                 kprintf("Warning: system low on memory+swap "
1517                         "shortage %d for %d ticks!\n",
1518                         avail_shortage, ticks - swap_fail_ticks);
1519         }
1520         if (swap_pager_full &&
1521             pass > 1 &&
1522             avail_shortage > 0 &&
1523             vm_paging_target() > 0 &&
1524             (unsigned int)(ticks - lastkillticks) >= hz) {
1525                 /*
1526                  * Kill something, maximum rate once per second to give
1527                  * the process time to free up sufficient memory.
1528                  */
1529                 lastkillticks = ticks;
1530                 info.bigproc = NULL;
1531                 info.bigsize = 0;
1532                 allproc_scan(vm_pageout_scan_callback, &info);
1533                 if (info.bigproc != NULL) {
1534                         info.bigproc->p_nice = PRIO_MIN;
1535                         info.bigproc->p_usched->resetpriority(
1536                                 FIRST_LWP_IN_PROC(info.bigproc));
1537                         killproc(info.bigproc, "out of swap space");
1538                         wakeup(&vmstats.v_free_count);
1539                         PRELE(info.bigproc);
1540                 }
1541         }
1542 }
1543
1544 static int
1545 vm_pageout_scan_callback(struct proc *p, void *data)
1546 {
1547         struct vm_pageout_scan_info *info = data;
1548         vm_offset_t size;
1549
1550         /*
1551          * Never kill system processes or init.  If we have configured swap
1552          * then try to avoid killing low-numbered pids.
1553          */
1554         if ((p->p_flags & P_SYSTEM) || (p->p_pid == 1) ||
1555             ((p->p_pid < 48) && (vm_swap_size != 0))) {
1556                 return (0);
1557         }
1558
1559         lwkt_gettoken(&p->p_token);
1560
1561         /*
1562          * if the process is in a non-running type state,
1563          * don't touch it.
1564          */
1565         if (p->p_stat != SACTIVE && p->p_stat != SSTOP && p->p_stat != SCORE) {
1566                 lwkt_reltoken(&p->p_token);
1567                 return (0);
1568         }
1569
1570         /*
1571          * Get the approximate process size.  Note that anonymous pages
1572          * with backing swap will be counted twice, but there should not
1573          * be too many such pages due to the stress the VM system is
1574          * under at this point.
1575          */
1576         size = vmspace_anonymous_count(p->p_vmspace) +
1577                 vmspace_swap_count(p->p_vmspace);
1578
1579         /*
1580          * If the this process is bigger than the biggest one
1581          * remember it.
1582          */
1583         if (info->bigsize < size) {
1584                 if (info->bigproc)
1585                         PRELE(info->bigproc);
1586                 PHOLD(p);
1587                 info->bigproc = p;
1588                 info->bigsize = size;
1589         }
1590         lwkt_reltoken(&p->p_token);
1591         lwkt_yield();
1592
1593         return(0);
1594 }
1595
1596 /*
1597  * This routine tries to maintain the pseudo LRU active queue,
1598  * so that during long periods of time where there is no paging,
1599  * that some statistic accumulation still occurs.  This code
1600  * helps the situation where paging just starts to occur.
1601  */
1602 static void
1603 vm_pageout_page_stats(int q)
1604 {
1605         static int fullintervalcount = 0;
1606         struct vm_page marker;
1607         vm_page_t m;
1608         int pcount, tpcount;            /* Number of pages to check */
1609         int page_shortage;
1610
1611         page_shortage = (vmstats.v_inactive_target + vmstats.v_cache_max +
1612                          vmstats.v_free_min) -
1613                         (vmstats.v_free_count + vmstats.v_inactive_count +
1614                          vmstats.v_cache_count);
1615
1616         if (page_shortage <= 0)
1617                 return;
1618
1619         pcount = vm_page_queues[PQ_ACTIVE + q].lcnt;
1620         fullintervalcount += vm_pageout_stats_interval;
1621         if (fullintervalcount < vm_pageout_full_stats_interval) {
1622                 tpcount = (vm_pageout_stats_max * pcount) /
1623                           vmstats.v_page_count + 1;
1624                 if (pcount > tpcount)
1625                         pcount = tpcount;
1626         } else {
1627                 fullintervalcount = 0;
1628         }
1629
1630         bzero(&marker, sizeof(marker));
1631         marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
1632         marker.queue = PQ_ACTIVE + q;
1633         marker.pc = q;
1634         marker.wire_count = 1;
1635
1636         vm_page_queues_spin_lock(PQ_ACTIVE + q);
1637         TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1638
1639         /*
1640          * Queue locked at top of loop to avoid stack marker issues.
1641          */
1642         while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
1643                pcount-- > 0)
1644         {
1645                 int actcount;
1646
1647                 KKASSERT(m->queue - m->pc == PQ_ACTIVE);
1648                 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1649                 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
1650                                    &marker, pageq);
1651
1652                 /*
1653                  * Skip marker pages (atomic against other markers to avoid
1654                  * infinite hop-over scans).
1655                  */
1656                 if (m->flags & PG_MARKER)
1657                         continue;
1658
1659                 /*
1660                  * Ignore pages we can't busy
1661                  */
1662                 if (vm_page_busy_try(m, TRUE))
1663                         continue;
1664
1665                 /*
1666                  * Remaining operations run with the page busy and neither
1667                  * the page or the queue will be spin-locked.
1668                  */
1669                 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1670                 KKASSERT(m->queue - m->pc == PQ_ACTIVE);
1671
1672                 /*
1673                  * We now have a safely busied page, the page and queue
1674                  * spinlocks have been released.
1675                  *
1676                  * Ignore held pages
1677                  */
1678                 if (m->hold_count) {
1679                         vm_page_wakeup(m);
1680                         goto next;
1681                 }
1682
1683                 /*
1684                  * Calculate activity
1685                  */
1686                 actcount = 0;
1687                 if (m->flags & PG_REFERENCED) {
1688                         vm_page_flag_clear(m, PG_REFERENCED);
1689                         actcount += 1;
1690                 }
1691                 actcount += pmap_ts_referenced(m);
1692
1693                 /*
1694                  * Update act_count and move page to end of queue.
1695                  */
1696                 if (actcount) {
1697                         m->act_count += ACT_ADVANCE + actcount;
1698                         if (m->act_count > ACT_MAX)
1699                                 m->act_count = ACT_MAX;
1700                         vm_page_and_queue_spin_lock(m);
1701                         if (m->queue - m->pc == PQ_ACTIVE) {
1702                                 TAILQ_REMOVE(
1703                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1704                                         m, pageq);
1705                                 TAILQ_INSERT_TAIL(
1706                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1707                                         m, pageq);
1708                         }
1709                         vm_page_and_queue_spin_unlock(m);
1710                         vm_page_wakeup(m);
1711                         goto next;
1712                 }
1713
1714                 if (m->act_count == 0) {
1715                         /*
1716                          * We turn off page access, so that we have
1717                          * more accurate RSS stats.  We don't do this
1718                          * in the normal page deactivation when the
1719                          * system is loaded VM wise, because the
1720                          * cost of the large number of page protect
1721                          * operations would be higher than the value
1722                          * of doing the operation.
1723                          *
1724                          * We use the marker to save our place so
1725                          * we can release the spin lock.  both (m)
1726                          * and (next) will be invalid.
1727                          */
1728                         vm_page_protect(m, VM_PROT_NONE);
1729                         vm_page_deactivate(m);
1730                 } else {
1731                         m->act_count -= min(m->act_count, ACT_DECLINE);
1732                         vm_page_and_queue_spin_lock(m);
1733                         if (m->queue - m->pc == PQ_ACTIVE) {
1734                                 TAILQ_REMOVE(
1735                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1736                                         m, pageq);
1737                                 TAILQ_INSERT_TAIL(
1738                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1739                                         m, pageq);
1740                         }
1741                         vm_page_and_queue_spin_unlock(m);
1742                 }
1743                 vm_page_wakeup(m);
1744 next:
1745                 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1746         }
1747
1748         /*
1749          * Remove our local marker
1750          *
1751          * Page queue still spin-locked.
1752          */
1753         TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1754         vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1755 }
1756
1757 static int
1758 vm_pageout_free_page_calc(vm_size_t count)
1759 {
1760         if (count < vmstats.v_page_count)
1761                  return 0;
1762         /*
1763          * free_reserved needs to include enough for the largest swap pager
1764          * structures plus enough for any pv_entry structs when paging.
1765          *
1766          * v_free_min           normal allocations
1767          * v_free_reserved      system allocations
1768          * v_pageout_free_min   allocations by pageout daemon
1769          * v_interrupt_free_min low level allocations (e.g swap structures)
1770          */
1771         if (vmstats.v_page_count > 1024)
1772                 vmstats.v_free_min = 64 + (vmstats.v_page_count - 1024) / 200;
1773         else
1774                 vmstats.v_free_min = 64;
1775         vmstats.v_free_reserved = vmstats.v_free_min * 4 / 8 + 7;
1776         vmstats.v_free_severe = vmstats.v_free_min * 4 / 8 + 0;
1777         vmstats.v_pageout_free_min = vmstats.v_free_min * 2 / 8 + 7;
1778         vmstats.v_interrupt_free_min = vmstats.v_free_min * 1 / 8 + 7;
1779
1780         return 1;
1781 }
1782
1783
1784 /*
1785  * vm_pageout is the high level pageout daemon.
1786  *
1787  * No requirements.
1788  */
1789 static void
1790 vm_pageout_thread(void)
1791 {
1792         int pass;
1793         int q;
1794         int q1iterator = 0;
1795         int q2iterator = 0;
1796
1797         /*
1798          * Initialize some paging parameters.
1799          */
1800         curthread->td_flags |= TDF_SYSTHREAD;
1801
1802         vm_pageout_free_page_calc(vmstats.v_page_count);
1803
1804         /*
1805          * v_free_target and v_cache_min control pageout hysteresis.  Note
1806          * that these are more a measure of the VM cache queue hysteresis
1807          * then the VM free queue.  Specifically, v_free_target is the
1808          * high water mark (free+cache pages).
1809          *
1810          * v_free_reserved + v_cache_min (mostly means v_cache_min) is the
1811          * low water mark, while v_free_min is the stop.  v_cache_min must
1812          * be big enough to handle memory needs while the pageout daemon
1813          * is signalled and run to free more pages.
1814          */
1815         if (vmstats.v_free_count > 6144)
1816                 vmstats.v_free_target = 4 * vmstats.v_free_min + vmstats.v_free_reserved;
1817         else
1818                 vmstats.v_free_target = 2 * vmstats.v_free_min + vmstats.v_free_reserved;
1819
1820         /*
1821          * NOTE: With the new buffer cache b_act_count we want the default
1822          *       inactive target to be a percentage of available memory.
1823          *
1824          *       The inactive target essentially determines the minimum
1825          *       number of 'temporary' pages capable of caching one-time-use
1826          *       files when the VM system is otherwise full of pages
1827          *       belonging to multi-time-use files or active program data.
1828          *
1829          * NOTE: The inactive target is aggressively persued only if the
1830          *       inactive queue becomes too small.  If the inactive queue
1831          *       is large enough to satisfy page movement to free+cache
1832          *       then it is repopulated more slowly from the active queue.
1833          *       This allows a general inactive_target default to be set.
1834          *
1835          *       There is an issue here for processes which sit mostly idle
1836          *       'overnight', such as sshd, tcsh, and X.  Any movement from
1837          *       the active queue will eventually cause such pages to
1838          *       recycle eventually causing a lot of paging in the morning.
1839          *       To reduce the incidence of this pages cycled out of the
1840          *       buffer cache are moved directly to the inactive queue if
1841          *       they were only used once or twice.
1842          *
1843          *       The vfs.vm_cycle_point sysctl can be used to adjust this.
1844          *       Increasing the value (up to 64) increases the number of
1845          *       buffer recyclements which go directly to the inactive queue.
1846          */
1847         if (vmstats.v_free_count > 2048) {
1848                 vmstats.v_cache_min = vmstats.v_free_target;
1849                 vmstats.v_cache_max = 2 * vmstats.v_cache_min;
1850         } else {
1851                 vmstats.v_cache_min = 0;
1852                 vmstats.v_cache_max = 0;
1853         }
1854         vmstats.v_inactive_target = vmstats.v_free_count / 4;
1855
1856         /* XXX does not really belong here */
1857         if (vm_page_max_wired == 0)
1858                 vm_page_max_wired = vmstats.v_free_count / 3;
1859
1860         if (vm_pageout_stats_max == 0)
1861                 vm_pageout_stats_max = vmstats.v_free_target;
1862
1863         /*
1864          * Set interval in seconds for stats scan.
1865          */
1866         if (vm_pageout_stats_interval == 0)
1867                 vm_pageout_stats_interval = 5;
1868         if (vm_pageout_full_stats_interval == 0)
1869                 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
1870         
1871
1872         /*
1873          * Set maximum free per pass
1874          */
1875         if (vm_pageout_stats_free_max == 0)
1876                 vm_pageout_stats_free_max = 5;
1877
1878         swap_pager_swap_init();
1879         pass = 0;
1880
1881         /*
1882          * The pageout daemon is never done, so loop forever.
1883          */
1884         while (TRUE) {
1885                 int error;
1886                 int avail_shortage;
1887                 int inactive_shortage;
1888                 int vnodes_skipped = 0;
1889                 int recycle_count = 0;
1890                 int tmp;
1891
1892                 /*
1893                  * Wait for an action request.  If we timeout check to
1894                  * see if paging is needed (in case the normal wakeup
1895                  * code raced us).
1896                  */
1897                 if (vm_pages_needed == 0) {
1898                         error = tsleep(&vm_pages_needed,
1899                                        0, "psleep",
1900                                        vm_pageout_stats_interval * hz);
1901                         if (error &&
1902                             vm_paging_needed() == 0 &&
1903                             vm_pages_needed == 0) {
1904                                 for (q = 0; q < PQ_L2_SIZE; ++q)
1905                                         vm_pageout_page_stats(q);
1906                                 continue;
1907                         }
1908                         vm_pages_needed = 1;
1909                 }
1910
1911                 mycpu->gd_cnt.v_pdwakeups++;
1912
1913                 /*
1914                  * Do whatever cleanup that the pmap code can.
1915                  */
1916                 pmap_collect();
1917
1918                 /*
1919                  * Scan for pageout.  Try to avoid thrashing the system
1920                  * with activity.
1921                  *
1922                  * Calculate our target for the number of free+cache pages we
1923                  * want to get to.  This is higher then the number that causes
1924                  * allocations to stall (severe) in order to provide hysteresis,
1925                  * and if we don't make it all the way but get to the minimum
1926                  * we're happy.  Goose it a bit if there are multiple requests
1927                  * for memory.
1928                  *
1929                  * Don't reduce avail_shortage inside the loop or the
1930                  * PQAVERAGE() calculation will break.
1931                  */
1932                 avail_shortage = vm_paging_target() + vm_pageout_deficit;
1933                 vm_pageout_deficit = 0;
1934
1935                 if (avail_shortage > 0) {
1936                         int delta = 0;
1937
1938                         for (q = 0; q < PQ_L2_SIZE; ++q) {
1939                                 delta += vm_pageout_scan_inactive(
1940                                             pass,
1941                                             (q + q1iterator) & PQ_L2_MASK,
1942                                             PQAVERAGE(avail_shortage),
1943                                             &vnodes_skipped);
1944                                 if (avail_shortage - delta <= 0)
1945                                         break;
1946                         }
1947                         avail_shortage -= delta;
1948                         q1iterator = q + 1;
1949                 }
1950
1951                 /*
1952                  * Figure out how many active pages we must deactivate.  If
1953                  * we were able to reach our target with just the inactive
1954                  * scan above we limit the number of active pages we
1955                  * deactivate to reduce unnecessary work.
1956                  */
1957                 inactive_shortage = vmstats.v_inactive_target -
1958                                     vmstats.v_inactive_count;
1959
1960                 /*
1961                  * If we were unable to free sufficient inactive pages to
1962                  * satisfy the free/cache queue requirements then simply
1963                  * reaching the inactive target may not be good enough.
1964                  * Try to deactivate pages in excess of the target based
1965                  * on the shortfall.
1966                  *
1967                  * However to prevent thrashing the VM system do not
1968                  * deactivate more than an additional 1/10 the inactive
1969                  * target's worth of active pages.
1970                  */
1971                 if (avail_shortage > 0) {
1972                         tmp = avail_shortage * 2;
1973                         if (tmp > vmstats.v_inactive_target / 10)
1974                                 tmp = vmstats.v_inactive_target / 10;
1975                         inactive_shortage += tmp;
1976                 }
1977
1978                 /*
1979                  * Only trigger on inactive shortage.  Triggering on
1980                  * avail_shortage can starve the active queue with
1981                  * unnecessary active->inactive transitions and destroy
1982                  * performance.
1983                  */
1984                 if (/*avail_shortage > 0 ||*/ inactive_shortage > 0) {
1985                         int delta = 0;
1986
1987                         for (q = 0; q < PQ_L2_SIZE; ++q) {
1988                                 delta += vm_pageout_scan_active(
1989                                                 pass,
1990                                                 (q + q2iterator) & PQ_L2_MASK,
1991                                                 PQAVERAGE(avail_shortage),
1992                                                 PQAVERAGE(inactive_shortage),
1993                                                 &recycle_count);
1994                                 if (inactive_shortage - delta <= 0 &&
1995                                     avail_shortage - delta <= 0) {
1996                                         break;
1997                                 }
1998                         }
1999                         inactive_shortage -= delta;
2000                         avail_shortage -= delta;
2001                         q2iterator = q + 1;
2002                 }
2003
2004                 /*
2005                  * Finally free enough cache pages to meet our free page
2006                  * requirement and take more drastic measures if we are
2007                  * still in trouble.
2008                  */
2009                 vm_pageout_scan_cache(avail_shortage, pass,
2010                                       vnodes_skipped, recycle_count);
2011
2012                 /*
2013                  * Wait for more work.
2014                  */
2015                 if (avail_shortage > 0) {
2016                         ++pass;
2017                         if (pass < 10 && vm_pages_needed > 1) {
2018                                 /*
2019                                  * Normal operation, additional processes
2020                                  * have already kicked us.  Retry immediately
2021                                  * unless swap space is completely full in
2022                                  * which case delay a bit.
2023                                  */
2024                                 if (swap_pager_full) {
2025                                         tsleep(&vm_pages_needed, 0, "pdelay",
2026                                                 hz / 5);
2027                                 } /* else immediate retry */
2028                         } else if (pass < 10) {
2029                                 /*
2030                                  * Normal operation, fewer processes.  Delay
2031                                  * a bit but allow wakeups.
2032                                  */
2033                                 vm_pages_needed = 0;
2034                                 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
2035                                 vm_pages_needed = 1;
2036                         } else if (swap_pager_full == 0) {
2037                                 /*
2038                                  * We've taken too many passes, forced delay.
2039                                  */
2040                                 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
2041                         } else {
2042                                 /*
2043                                  * Running out of memory, catastrophic
2044                                  * back-off to one-second intervals.
2045                                  */
2046                                 tsleep(&vm_pages_needed, 0, "pdelay", hz);
2047                         }
2048                 } else if (vm_pages_needed) {
2049                         /*
2050                          * Interlocked wakeup of waiters (non-optional).
2051                          *
2052                          * Similar to vm_page_free_wakeup() in vm_page.c,
2053                          * wake
2054                          */
2055                         pass = 0;
2056                         if (!vm_page_count_min(vm_page_free_hysteresis) ||
2057                             !vm_page_count_target()) {
2058                                 vm_pages_needed = 0;
2059                                 wakeup(&vmstats.v_free_count);
2060                         }
2061                 } else {
2062                         pass = 0;
2063                 }
2064         }
2065 }
2066
2067 static struct kproc_desc page_kp = {
2068         "pagedaemon",
2069         vm_pageout_thread,
2070         &pagethread
2071 };
2072 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp);
2073
2074
2075 /*
2076  * Called after allocating a page out of the cache or free queue
2077  * to possibly wake the pagedaemon up to replentish our supply.
2078  *
2079  * We try to generate some hysteresis by waking the pagedaemon up
2080  * when our free+cache pages go below the free_min+cache_min level.
2081  * The pagedaemon tries to get the count back up to at least the
2082  * minimum, and through to the target level if possible.
2083  *
2084  * If the pagedaemon is already active bump vm_pages_needed as a hint
2085  * that there are even more requests pending.
2086  *
2087  * SMP races ok?
2088  * No requirements.
2089  */
2090 void
2091 pagedaemon_wakeup(void)
2092 {
2093         if (vm_paging_needed() && curthread != pagethread) {
2094                 if (vm_pages_needed == 0) {
2095                         vm_pages_needed = 1;    /* SMP race ok */
2096                         wakeup(&vm_pages_needed);
2097                 } else if (vm_page_count_min(0)) {
2098                         ++vm_pages_needed;      /* SMP race ok */
2099                 }
2100         }
2101 }
2102
2103 #if !defined(NO_SWAPPING)
2104
2105 /*
2106  * SMP races ok?
2107  * No requirements.
2108  */
2109 static void
2110 vm_req_vmdaemon(void)
2111 {
2112         static int lastrun = 0;
2113
2114         if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
2115                 wakeup(&vm_daemon_needed);
2116                 lastrun = ticks;
2117         }
2118 }
2119
2120 static int vm_daemon_callback(struct proc *p, void *data __unused);
2121
2122 /*
2123  * No requirements.
2124  */
2125 static void
2126 vm_daemon(void)
2127 {
2128         /*
2129          * XXX vm_daemon_needed specific token?
2130          */
2131         while (TRUE) {
2132                 tsleep(&vm_daemon_needed, 0, "psleep", 0);
2133                 if (vm_pageout_req_swapout) {
2134                         swapout_procs(vm_pageout_req_swapout);
2135                         vm_pageout_req_swapout = 0;
2136                 }
2137                 /*
2138                  * scan the processes for exceeding their rlimits or if
2139                  * process is swapped out -- deactivate pages
2140                  */
2141                 allproc_scan(vm_daemon_callback, NULL);
2142         }
2143 }
2144
2145 static int
2146 vm_daemon_callback(struct proc *p, void *data __unused)
2147 {
2148         struct vmspace *vm;
2149         vm_pindex_t limit, size;
2150
2151         /*
2152          * if this is a system process or if we have already
2153          * looked at this process, skip it.
2154          */
2155         lwkt_gettoken(&p->p_token);
2156
2157         if (p->p_flags & (P_SYSTEM | P_WEXIT)) {
2158                 lwkt_reltoken(&p->p_token);
2159                 return (0);
2160         }
2161
2162         /*
2163          * if the process is in a non-running type state,
2164          * don't touch it.
2165          */
2166         if (p->p_stat != SACTIVE && p->p_stat != SSTOP && p->p_stat != SCORE) {
2167                 lwkt_reltoken(&p->p_token);
2168                 return (0);
2169         }
2170
2171         /*
2172          * get a limit
2173          */
2174         limit = OFF_TO_IDX(qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
2175                                 p->p_rlimit[RLIMIT_RSS].rlim_max));
2176
2177         /*
2178          * let processes that are swapped out really be
2179          * swapped out.  Set the limit to nothing to get as
2180          * many pages out to swap as possible.
2181          */
2182         if (p->p_flags & P_SWAPPEDOUT)
2183                 limit = 0;
2184
2185         vm = p->p_vmspace;
2186         vmspace_hold(vm);
2187         size = vmspace_resident_count(vm);
2188         if (limit >= 0 && size >= limit) {
2189                 vm_pageout_map_deactivate_pages(&vm->vm_map, limit);
2190         }
2191         vmspace_drop(vm);
2192
2193         lwkt_reltoken(&p->p_token);
2194
2195         return (0);
2196 }
2197
2198 #endif