89129eee4e37a2bf8ac0b8a52e339c40aea37ed1
[dragonfly.git] / sys / vm / vm_pageout.c
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * The Mach Operating System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *      from: @(#)vm_pageout.c  7.4 (Berkeley) 5/7/91
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $FreeBSD: src/sys/vm/vm_pageout.c,v 1.151.2.15 2002/12/29 18:21:04 dillon Exp $
65  */
66
67 /*
68  *      The proverbial page-out daemon.
69  */
70
71 #include "opt_vm.h"
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/kernel.h>
75 #include <sys/proc.h>
76 #include <sys/kthread.h>
77 #include <sys/resourcevar.h>
78 #include <sys/signalvar.h>
79 #include <sys/vnode.h>
80 #include <sys/vmmeter.h>
81 #include <sys/sysctl.h>
82
83 #include <vm/vm.h>
84 #include <vm/vm_param.h>
85 #include <sys/lock.h>
86 #include <vm/vm_object.h>
87 #include <vm/vm_page.h>
88 #include <vm/vm_map.h>
89 #include <vm/vm_pageout.h>
90 #include <vm/vm_pager.h>
91 #include <vm/swap_pager.h>
92 #include <vm/vm_extern.h>
93
94 #include <sys/thread2.h>
95 #include <sys/spinlock2.h>
96 #include <vm/vm_page2.h>
97
98 /*
99  * System initialization
100  */
101
102 /* the kernel process "vm_pageout"*/
103 static int vm_pageout_clean (vm_page_t);
104 static int vm_pageout_free_page_calc (vm_size_t count);
105 struct thread *pagethread;
106
107 #if !defined(NO_SWAPPING)
108 /* the kernel process "vm_daemon"*/
109 static void vm_daemon (void);
110 static struct   thread *vmthread;
111
112 static struct kproc_desc vm_kp = {
113         "vmdaemon",
114         vm_daemon,
115         &vmthread
116 };
117 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp)
118 #endif
119
120 int vm_pages_needed=0;          /* Event on which pageout daemon sleeps */
121 int vm_pageout_deficit=0;       /* Estimated number of pages deficit */
122 int vm_pageout_pages_needed=0;  /* flag saying that the pageout daemon needs pages */
123
124 #if !defined(NO_SWAPPING)
125 static int vm_pageout_req_swapout;      /* XXX */
126 static int vm_daemon_needed;
127 #endif
128 static int vm_max_launder = 32;
129 static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
130 static int vm_pageout_full_stats_interval = 0;
131 static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0;
132 static int defer_swap_pageouts=0;
133 static int disable_swap_pageouts=0;
134
135 #if defined(NO_SWAPPING)
136 static int vm_swap_enabled=0;
137 static int vm_swap_idle_enabled=0;
138 #else
139 static int vm_swap_enabled=1;
140 static int vm_swap_idle_enabled=0;
141 #endif
142
143 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm,
144         CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt");
145
146 SYSCTL_INT(_vm, OID_AUTO, max_launder,
147         CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout");
148
149 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
150         CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
151
152 SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
153         CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
154
155 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
156         CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
157
158 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
159         CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented");
160
161 #if defined(NO_SWAPPING)
162 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
163         CTLFLAG_RD, &vm_swap_enabled, 0, "");
164 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
165         CTLFLAG_RD, &vm_swap_idle_enabled, 0, "");
166 #else
167 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
168         CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
169 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
170         CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
171 #endif
172
173 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
174         CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
175
176 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
177         CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
178
179 static int pageout_lock_miss;
180 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
181         CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
182
183 #define VM_PAGEOUT_PAGE_COUNT 16
184 int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
185
186 int vm_page_max_wired;          /* XXX max # of wired pages system-wide */
187
188 #if !defined(NO_SWAPPING)
189 typedef void freeer_fcn_t (vm_map_t, vm_object_t, vm_pindex_t, int);
190 static void vm_pageout_map_deactivate_pages (vm_map_t, vm_pindex_t);
191 static freeer_fcn_t vm_pageout_object_deactivate_pages;
192 static void vm_req_vmdaemon (void);
193 #endif
194 static void vm_pageout_page_stats(int q);
195
196 static __inline int
197 PQAVERAGE(int n)
198 {
199         if (n >= 0)
200                 return((n + (PQ_L2_SIZE - 1)) / PQ_L2_SIZE + 1);
201         else
202                 return((n - (PQ_L2_SIZE - 1)) / PQ_L2_SIZE - 1);
203 }
204
205 /*
206  * vm_pageout_clean:
207  *
208  * Clean the page and remove it from the laundry.  The page must not be
209  * busy on-call.
210  * 
211  * We set the busy bit to cause potential page faults on this page to
212  * block.  Note the careful timing, however, the busy bit isn't set till
213  * late and we cannot do anything that will mess with the page.
214  */
215 static int
216 vm_pageout_clean(vm_page_t m)
217 {
218         vm_object_t object;
219         vm_page_t mc[2*vm_pageout_page_count];
220         int pageout_count;
221         int error;
222         int ib, is, page_base;
223         vm_pindex_t pindex = m->pindex;
224
225         object = m->object;
226
227         /*
228          * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
229          * with the new swapper, but we could have serious problems paging
230          * out other object types if there is insufficient memory.  
231          *
232          * Unfortunately, checking free memory here is far too late, so the
233          * check has been moved up a procedural level.
234          */
235
236         /*
237          * Don't mess with the page if it's busy, held, or special
238          *
239          * XXX do we really need to check hold_count here?  hold_count
240          * isn't supposed to mess with vm_page ops except prevent the
241          * page from being reused.
242          */
243         if (m->hold_count != 0 || (m->flags & PG_UNMANAGED)) {
244                 vm_page_wakeup(m);
245                 return 0;
246         }
247
248         mc[vm_pageout_page_count] = m;
249         pageout_count = 1;
250         page_base = vm_pageout_page_count;
251         ib = 1;
252         is = 1;
253
254         /*
255          * Scan object for clusterable pages.
256          *
257          * We can cluster ONLY if: ->> the page is NOT
258          * clean, wired, busy, held, or mapped into a
259          * buffer, and one of the following:
260          * 1) The page is inactive, or a seldom used
261          *    active page.
262          * -or-
263          * 2) we force the issue.
264          *
265          * During heavy mmap/modification loads the pageout
266          * daemon can really fragment the underlying file
267          * due to flushing pages out of order and not trying
268          * align the clusters (which leave sporatic out-of-order
269          * holes).  To solve this problem we do the reverse scan
270          * first and attempt to align our cluster, then do a 
271          * forward scan if room remains.
272          */
273
274         vm_object_hold(object);
275 more:
276         while (ib && pageout_count < vm_pageout_page_count) {
277                 vm_page_t p;
278
279                 if (ib > pindex) {
280                         ib = 0;
281                         break;
282                 }
283
284                 p = vm_page_lookup_busy_try(object, pindex - ib, TRUE, &error);
285                 if (error || p == NULL) {
286                         ib = 0;
287                         break;
288                 }
289                 if ((p->queue - p->pc) == PQ_CACHE ||
290                     (p->flags & PG_UNMANAGED)) {
291                         vm_page_wakeup(p);
292                         ib = 0;
293                         break;
294                 }
295                 vm_page_test_dirty(p);
296                 if (((p->dirty & p->valid) == 0 &&
297                      (p->flags & PG_NEED_COMMIT) == 0) ||
298                     p->queue - p->pc != PQ_INACTIVE ||
299                     p->wire_count != 0 ||       /* may be held by buf cache */
300                     p->hold_count != 0) {       /* may be undergoing I/O */
301                         vm_page_wakeup(p);
302                         ib = 0;
303                         break;
304                 }
305                 mc[--page_base] = p;
306                 ++pageout_count;
307                 ++ib;
308                 /*
309                  * alignment boundry, stop here and switch directions.  Do
310                  * not clear ib.
311                  */
312                 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
313                         break;
314         }
315
316         while (pageout_count < vm_pageout_page_count && 
317             pindex + is < object->size) {
318                 vm_page_t p;
319
320                 p = vm_page_lookup_busy_try(object, pindex + is, TRUE, &error);
321                 if (error || p == NULL)
322                         break;
323                 if (((p->queue - p->pc) == PQ_CACHE) ||
324                     (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) {
325                         vm_page_wakeup(p);
326                         break;
327                 }
328                 vm_page_test_dirty(p);
329                 if (((p->dirty & p->valid) == 0 &&
330                      (p->flags & PG_NEED_COMMIT) == 0) ||
331                     p->queue - p->pc != PQ_INACTIVE ||
332                     p->wire_count != 0 ||       /* may be held by buf cache */
333                     p->hold_count != 0) {       /* may be undergoing I/O */
334                         vm_page_wakeup(p);
335                         break;
336                 }
337                 mc[page_base + pageout_count] = p;
338                 ++pageout_count;
339                 ++is;
340         }
341
342         /*
343          * If we exhausted our forward scan, continue with the reverse scan
344          * when possible, even past a page boundry.  This catches boundry
345          * conditions.
346          */
347         if (ib && pageout_count < vm_pageout_page_count)
348                 goto more;
349
350         vm_object_drop(object);
351
352         /*
353          * we allow reads during pageouts...
354          */
355         return vm_pageout_flush(&mc[page_base], pageout_count, 0);
356 }
357
358 /*
359  * vm_pageout_flush() - launder the given pages
360  *
361  *      The given pages are laundered.  Note that we setup for the start of
362  *      I/O ( i.e. busy the page ), mark it read-only, and bump the object
363  *      reference count all in here rather then in the parent.  If we want
364  *      the parent to do more sophisticated things we may have to change
365  *      the ordering.
366  *
367  *      The pages in the array must be busied by the caller and will be
368  *      unbusied by this function.
369  */
370 int
371 vm_pageout_flush(vm_page_t *mc, int count, int flags)
372 {
373         vm_object_t object;
374         int pageout_status[count];
375         int numpagedout = 0;
376         int i;
377
378         /*
379          * Initiate I/O.  Bump the vm_page_t->busy counter.
380          */
381         for (i = 0; i < count; i++) {
382                 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
383                         ("vm_pageout_flush page %p index %d/%d: partially "
384                          "invalid page", mc[i], i, count));
385                 vm_page_io_start(mc[i]);
386         }
387
388         /*
389          * We must make the pages read-only.  This will also force the
390          * modified bit in the related pmaps to be cleared.  The pager
391          * cannot clear the bit for us since the I/O completion code
392          * typically runs from an interrupt.  The act of making the page
393          * read-only handles the case for us.
394          *
395          * Then we can unbusy the pages, we still hold a reference by virtue
396          * of our soft-busy.
397          */
398         for (i = 0; i < count; i++) {
399                 vm_page_protect(mc[i], VM_PROT_READ);
400                 vm_page_wakeup(mc[i]);
401         }
402
403         object = mc[0]->object;
404         vm_object_pip_add(object, count);
405
406         vm_pager_put_pages(object, mc, count,
407             (flags | ((object == &kernel_object) ? VM_PAGER_PUT_SYNC : 0)),
408             pageout_status);
409
410         for (i = 0; i < count; i++) {
411                 vm_page_t mt = mc[i];
412
413                 switch (pageout_status[i]) {
414                 case VM_PAGER_OK:
415                         numpagedout++;
416                         break;
417                 case VM_PAGER_PEND:
418                         numpagedout++;
419                         break;
420                 case VM_PAGER_BAD:
421                         /*
422                          * Page outside of range of object. Right now we
423                          * essentially lose the changes by pretending it
424                          * worked.
425                          */
426                         vm_page_busy_wait(mt, FALSE, "pgbad");
427                         pmap_clear_modify(mt);
428                         vm_page_undirty(mt);
429                         vm_page_wakeup(mt);
430                         break;
431                 case VM_PAGER_ERROR:
432                 case VM_PAGER_FAIL:
433                         /*
434                          * A page typically cannot be paged out when we
435                          * have run out of swap.  We leave the page
436                          * marked inactive and will try to page it out
437                          * again later.
438                          *
439                          * Starvation of the active page list is used to
440                          * determine when the system is massively memory
441                          * starved.
442                          */
443                         break;
444                 case VM_PAGER_AGAIN:
445                         break;
446                 }
447
448                 /*
449                  * If the operation is still going, leave the page busy to
450                  * block all other accesses. Also, leave the paging in
451                  * progress indicator set so that we don't attempt an object
452                  * collapse.
453                  *
454                  * For any pages which have completed synchronously, 
455                  * deactivate the page if we are under a severe deficit.
456                  * Do not try to enter them into the cache, though, they
457                  * might still be read-heavy.
458                  */
459                 if (pageout_status[i] != VM_PAGER_PEND) {
460                         vm_page_busy_wait(mt, FALSE, "pgouw");
461                         if (vm_page_count_severe())
462                                 vm_page_deactivate(mt);
463 #if 0
464                         if (!vm_page_count_severe() || !vm_page_try_to_cache(mt))
465                                 vm_page_protect(mt, VM_PROT_READ);
466 #endif
467                         vm_page_io_finish(mt);
468                         vm_page_wakeup(mt);
469                         vm_object_pip_wakeup(object);
470                 }
471         }
472         return numpagedout;
473 }
474
475 #if !defined(NO_SWAPPING)
476 /*
477  * deactivate enough pages to satisfy the inactive target
478  * requirements or if vm_page_proc_limit is set, then
479  * deactivate all of the pages in the object and its
480  * backing_objects.
481  *
482  * The map must be locked.
483  * The caller must hold the vm_object.
484  */
485 static int vm_pageout_object_deactivate_pages_callback(vm_page_t, void *);
486
487 static void
488 vm_pageout_object_deactivate_pages(vm_map_t map, vm_object_t object,
489                                    vm_pindex_t desired, int map_remove_only)
490 {
491         struct rb_vm_page_scan_info info;
492         vm_object_t lobject;
493         vm_object_t tobject;
494         int remove_mode;
495
496         ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
497         lobject = object;
498
499         while (lobject) {
500                 if (pmap_resident_count(vm_map_pmap(map)) <= desired)
501                         break;
502                 if (lobject->type == OBJT_DEVICE || lobject->type == OBJT_PHYS)
503                         break;
504                 if (lobject->paging_in_progress)
505                         break;
506
507                 remove_mode = map_remove_only;
508                 if (lobject->shadow_count > 1)
509                         remove_mode = 1;
510
511                 /*
512                  * scan the objects entire memory queue.  We hold the
513                  * object's token so the scan should not race anything.
514                  */
515                 info.limit = remove_mode;
516                 info.map = map;
517                 info.desired = desired;
518                 vm_page_rb_tree_RB_SCAN(&lobject->rb_memq, NULL,
519                                 vm_pageout_object_deactivate_pages_callback,
520                                 &info
521                 );
522                 while ((tobject = lobject->backing_object) != NULL) {
523                         KKASSERT(tobject != object);
524                         vm_object_hold(tobject);
525                         if (tobject == lobject->backing_object)
526                                 break;
527                         vm_object_drop(tobject);
528                 }
529                 if (lobject != object) {
530                         vm_object_lock_swap();
531                         vm_object_drop(lobject);
532                 }
533                 lobject = tobject;
534         }
535         if (lobject != object)
536                 vm_object_drop(lobject);
537 }
538
539 /*
540  * The caller must hold the vm_object.
541  */
542 static int
543 vm_pageout_object_deactivate_pages_callback(vm_page_t p, void *data)
544 {
545         struct rb_vm_page_scan_info *info = data;
546         int actcount;
547
548         if (pmap_resident_count(vm_map_pmap(info->map)) <= info->desired) {
549                 return(-1);
550         }
551         mycpu->gd_cnt.v_pdpages++;
552
553         if (vm_page_busy_try(p, TRUE))
554                 return(0);
555         if (p->wire_count || p->hold_count || (p->flags & PG_UNMANAGED)) {
556                 vm_page_wakeup(p);
557                 return(0);
558         }
559         if (!pmap_page_exists_quick(vm_map_pmap(info->map), p)) {
560                 vm_page_wakeup(p);
561                 return(0);
562         }
563
564         actcount = pmap_ts_referenced(p);
565         if (actcount) {
566                 vm_page_flag_set(p, PG_REFERENCED);
567         } else if (p->flags & PG_REFERENCED) {
568                 actcount = 1;
569         }
570
571         vm_page_and_queue_spin_lock(p);
572         if (p->queue - p->pc != PQ_ACTIVE && (p->flags & PG_REFERENCED)) {
573                 vm_page_and_queue_spin_unlock(p);
574                 vm_page_activate(p);
575                 p->act_count += actcount;
576                 vm_page_flag_clear(p, PG_REFERENCED);
577         } else if (p->queue - p->pc == PQ_ACTIVE) {
578                 if ((p->flags & PG_REFERENCED) == 0) {
579                         p->act_count -= min(p->act_count, ACT_DECLINE);
580                         if (!info->limit &&
581                             (vm_pageout_algorithm || (p->act_count == 0))) {
582                                 vm_page_and_queue_spin_unlock(p);
583                                 vm_page_protect(p, VM_PROT_NONE);
584                                 vm_page_deactivate(p);
585                         } else {
586                                 TAILQ_REMOVE(&vm_page_queues[p->queue].pl,
587                                              p, pageq);
588                                 TAILQ_INSERT_TAIL(&vm_page_queues[p->queue].pl,
589                                                   p, pageq);
590                                 vm_page_and_queue_spin_unlock(p);
591                         }
592                 } else {
593                         vm_page_and_queue_spin_unlock(p);
594                         vm_page_activate(p);
595                         vm_page_flag_clear(p, PG_REFERENCED);
596
597                         vm_page_and_queue_spin_lock(p);
598                         if (p->queue - p->pc == PQ_ACTIVE) {
599                                 if (p->act_count < (ACT_MAX - ACT_ADVANCE))
600                                         p->act_count += ACT_ADVANCE;
601                                 TAILQ_REMOVE(&vm_page_queues[p->queue].pl,
602                                              p, pageq);
603                                 TAILQ_INSERT_TAIL(&vm_page_queues[p->queue].pl,
604                                                   p, pageq);
605                         }
606                         vm_page_and_queue_spin_unlock(p);
607                 }
608         } else if (p->queue - p->pc == PQ_INACTIVE) {
609                 vm_page_and_queue_spin_unlock(p);
610                 vm_page_protect(p, VM_PROT_NONE);
611         } else {
612                 vm_page_and_queue_spin_unlock(p);
613         }
614         vm_page_wakeup(p);
615         return(0);
616 }
617
618 /*
619  * Deactivate some number of pages in a map, try to do it fairly, but
620  * that is really hard to do.
621  */
622 static void
623 vm_pageout_map_deactivate_pages(vm_map_t map, vm_pindex_t desired)
624 {
625         vm_map_entry_t tmpe;
626         vm_object_t obj, bigobj;
627         int nothingwired;
628
629         if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT)) {
630                 return;
631         }
632
633         bigobj = NULL;
634         nothingwired = TRUE;
635
636         /*
637          * first, search out the biggest object, and try to free pages from
638          * that.
639          */
640         tmpe = map->header.next;
641         while (tmpe != &map->header) {
642                 switch(tmpe->maptype) {
643                 case VM_MAPTYPE_NORMAL:
644                 case VM_MAPTYPE_VPAGETABLE:
645                         obj = tmpe->object.vm_object;
646                         if ((obj != NULL) && (obj->shadow_count <= 1) &&
647                                 ((bigobj == NULL) ||
648                                  (bigobj->resident_page_count < obj->resident_page_count))) {
649                                 bigobj = obj;
650                         }
651                         break;
652                 default:
653                         break;
654                 }
655                 if (tmpe->wired_count > 0)
656                         nothingwired = FALSE;
657                 tmpe = tmpe->next;
658         }
659
660         if (bigobj)  {
661                 vm_object_hold(bigobj);
662                 vm_pageout_object_deactivate_pages(map, bigobj, desired, 0);
663                 vm_object_drop(bigobj);
664         }
665
666         /*
667          * Next, hunt around for other pages to deactivate.  We actually
668          * do this search sort of wrong -- .text first is not the best idea.
669          */
670         tmpe = map->header.next;
671         while (tmpe != &map->header) {
672                 if (pmap_resident_count(vm_map_pmap(map)) <= desired)
673                         break;
674                 switch(tmpe->maptype) {
675                 case VM_MAPTYPE_NORMAL:
676                 case VM_MAPTYPE_VPAGETABLE:
677                         obj = tmpe->object.vm_object;
678                         if (obj) {
679                                 vm_object_hold(obj);
680                                 vm_pageout_object_deactivate_pages(map, obj, desired, 0);
681                                 vm_object_drop(obj);
682                         }
683                         break;
684                 default:
685                         break;
686                 }
687                 tmpe = tmpe->next;
688         }
689
690         /*
691          * Remove all mappings if a process is swapped out, this will free page
692          * table pages.
693          */
694         if (desired == 0 && nothingwired)
695                 pmap_remove(vm_map_pmap(map),
696                             VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
697         vm_map_unlock(map);
698 }
699 #endif
700
701 /*
702  * Called when the pageout scan wants to free a page.  We no longer
703  * try to cycle the vm_object here with a reference & dealloc, which can
704  * cause a non-trivial object collapse in a critical path.
705  *
706  * It is unclear why we cycled the ref_count in the past, perhaps to try
707  * to optimize shadow chain collapses but I don't quite see why it would
708  * be necessary.  An OBJ_DEAD object should terminate any and all vm_pages
709  * synchronously and not have to be kicked-start.
710  */
711 static void
712 vm_pageout_page_free(vm_page_t m) 
713 {
714         vm_page_protect(m, VM_PROT_NONE);
715         vm_page_free(m);
716 }
717
718 /*
719  * vm_pageout_scan does the dirty work for the pageout daemon.
720  */
721 struct vm_pageout_scan_info {
722         struct proc *bigproc;
723         vm_offset_t bigsize;
724 };
725
726 static int vm_pageout_scan_callback(struct proc *p, void *data);
727
728 static int
729 vm_pageout_scan_inactive(int pass, int q, int avail_shortage,
730                          int *vnodes_skippedp)
731 {
732         vm_page_t m;
733         struct vm_page marker;
734         struct vnode *vpfailed;         /* warning, allowed to be stale */
735         int maxscan;
736         int delta = 0;
737         vm_object_t object;
738         int actcount;
739         int maxlaunder;
740
741         /*
742          * Start scanning the inactive queue for pages we can move to the
743          * cache or free.  The scan will stop when the target is reached or
744          * we have scanned the entire inactive queue.  Note that m->act_count
745          * is not used to form decisions for the inactive queue, only for the
746          * active queue.
747          *
748          * maxlaunder limits the number of dirty pages we flush per scan.
749          * For most systems a smaller value (16 or 32) is more robust under
750          * extreme memory and disk pressure because any unnecessary writes
751          * to disk can result in extreme performance degredation.  However,
752          * systems with excessive dirty pages (especially when MAP_NOSYNC is
753          * used) will die horribly with limited laundering.  If the pageout
754          * daemon cannot clean enough pages in the first pass, we let it go
755          * all out in succeeding passes.
756          */
757         if ((maxlaunder = vm_max_launder) <= 1)
758                 maxlaunder = 1;
759         if (pass)
760                 maxlaunder = 10000;
761
762         /*
763          * Initialize our marker
764          */
765         bzero(&marker, sizeof(marker));
766         marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
767         marker.queue = PQ_INACTIVE + q;
768         marker.pc = q;
769         marker.wire_count = 1;
770
771         /*
772          * Inactive queue scan.
773          *
774          * NOTE: The vm_page must be spinlocked before the queue to avoid
775          *       deadlocks, so it is easiest to simply iterate the loop
776          *       with the queue unlocked at the top.
777          */
778         vpfailed = NULL;
779
780         vm_page_queues_spin_lock(PQ_INACTIVE + q);
781         TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
782         maxscan = vm_page_queues[PQ_INACTIVE + q].lcnt;
783         vm_page_queues_spin_unlock(PQ_INACTIVE + q);
784
785         while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
786                maxscan-- > 0 && avail_shortage - delta > 0)
787         {
788                 vm_page_and_queue_spin_lock(m);
789                 if (m != TAILQ_NEXT(&marker, pageq)) {
790                         vm_page_and_queue_spin_unlock(m);
791                         ++maxscan;
792                         continue;
793                 }
794                 KKASSERT(m->queue - m->pc == PQ_INACTIVE);
795                 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl,
796                              &marker, pageq);
797                 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE + q].pl, m,
798                                    &marker, pageq);
799                 mycpu->gd_cnt.v_pdpages++;
800
801                 /*
802                  * Skip marker pages
803                  */
804                 if (m->flags & PG_MARKER) {
805                         vm_page_and_queue_spin_unlock(m);
806                         continue;
807                 }
808
809                 /*
810                  * Try to busy the page.  Don't mess with pages which are
811                  * already busy or reorder them in the queue.
812                  */
813                 if (vm_page_busy_try(m, TRUE)) {
814                         vm_page_and_queue_spin_unlock(m);
815                         continue;
816                 }
817                 vm_page_and_queue_spin_unlock(m);
818                 KKASSERT(m->queue - m->pc == PQ_INACTIVE);
819
820                 lwkt_yield();
821
822                 /*
823                  * The page has been successfully busied and is now no
824                  * longer spinlocked.  The queue is no longer spinlocked
825                  * either.
826                  */
827
828                 /*
829                  * It is possible for a page to be busied ad-hoc (e.g. the
830                  * pmap_collect() code) and wired and race against the
831                  * allocation of a new page.  vm_page_alloc() may be forced
832                  * to deactivate the wired page in which case it winds up
833                  * on the inactive queue and must be handled here.  We
834                  * correct the problem simply by unqueuing the page.
835                  */
836                 if (m->wire_count) {
837                         vm_page_unqueue_nowakeup(m);
838                         vm_page_wakeup(m);
839                         kprintf("WARNING: pagedaemon: wired page on "
840                                 "inactive queue %p\n", m);
841                         continue;
842                 }
843
844                 /*
845                  * A held page may be undergoing I/O, so skip it.
846                  */
847                 if (m->hold_count) {
848                         vm_page_and_queue_spin_lock(m);
849                         if (m->queue - m->pc == PQ_INACTIVE) {
850                                 TAILQ_REMOVE(
851                                         &vm_page_queues[PQ_INACTIVE + q].pl,
852                                         m, pageq);
853                                 TAILQ_INSERT_TAIL(
854                                         &vm_page_queues[PQ_INACTIVE + q].pl,
855                                         m, pageq);
856                                 ++vm_swapcache_inactive_heuristic;
857                         }
858                         vm_page_and_queue_spin_unlock(m);
859                         vm_page_wakeup(m);
860                         continue;
861                 }
862
863                 if (m->object == NULL || m->object->ref_count == 0) {
864                         /*
865                          * If the object is not being used, we ignore previous 
866                          * references.
867                          */
868                         vm_page_flag_clear(m, PG_REFERENCED);
869                         pmap_clear_reference(m);
870                         /* fall through to end */
871                 } else if (((m->flags & PG_REFERENCED) == 0) &&
872                             (actcount = pmap_ts_referenced(m))) {
873                         /*
874                          * Otherwise, if the page has been referenced while 
875                          * in the inactive queue, we bump the "activation
876                          * count" upwards, making it less likely that the
877                          * page will be added back to the inactive queue
878                          * prematurely again.  Here we check the page tables
879                          * (or emulated bits, if any), given the upper level
880                          * VM system not knowing anything about existing 
881                          * references.
882                          */
883                         vm_page_activate(m);
884                         m->act_count += (actcount + ACT_ADVANCE);
885                         vm_page_wakeup(m);
886                         continue;
887                 }
888
889                 /*
890                  * (m) is still busied.
891                  *
892                  * If the upper level VM system knows about any page 
893                  * references, we activate the page.  We also set the 
894                  * "activation count" higher than normal so that we will less 
895                  * likely place pages back onto the inactive queue again.
896                  */
897                 if ((m->flags & PG_REFERENCED) != 0) {
898                         vm_page_flag_clear(m, PG_REFERENCED);
899                         actcount = pmap_ts_referenced(m);
900                         vm_page_activate(m);
901                         m->act_count += (actcount + ACT_ADVANCE + 1);
902                         vm_page_wakeup(m);
903                         continue;
904                 }
905
906                 /*
907                  * If the upper level VM system doesn't know anything about 
908                  * the page being dirty, we have to check for it again.  As 
909                  * far as the VM code knows, any partially dirty pages are 
910                  * fully dirty.
911                  *
912                  * Pages marked PG_WRITEABLE may be mapped into the user
913                  * address space of a process running on another cpu.  A
914                  * user process (without holding the MP lock) running on
915                  * another cpu may be able to touch the page while we are
916                  * trying to remove it.  vm_page_cache() will handle this
917                  * case for us.
918                  */
919                 if (m->dirty == 0) {
920                         vm_page_test_dirty(m);
921                 } else {
922                         vm_page_dirty(m);
923                 }
924
925                 if (m->valid == 0 && (m->flags & PG_NEED_COMMIT) == 0) {
926                         /*
927                          * Invalid pages can be easily freed
928                          */
929                         vm_pageout_page_free(m);
930                         mycpu->gd_cnt.v_dfree++;
931                         ++delta;
932                 } else if (m->dirty == 0 && (m->flags & PG_NEED_COMMIT) == 0) {
933                         /*
934                          * Clean pages can be placed onto the cache queue.
935                          * This effectively frees them.
936                          */
937                         vm_page_cache(m);
938                         ++delta;
939                 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) {
940                         /*
941                          * Dirty pages need to be paged out, but flushing
942                          * a page is extremely expensive verses freeing
943                          * a clean page.  Rather then artificially limiting
944                          * the number of pages we can flush, we instead give
945                          * dirty pages extra priority on the inactive queue
946                          * by forcing them to be cycled through the queue
947                          * twice before being flushed, after which the 
948                          * (now clean) page will cycle through once more
949                          * before being freed.  This significantly extends
950                          * the thrash point for a heavily loaded machine.
951                          */
952                         vm_page_flag_set(m, PG_WINATCFLS);
953                         vm_page_and_queue_spin_lock(m);
954                         if (m->queue - m->pc == PQ_INACTIVE) {
955                                 TAILQ_REMOVE(
956                                         &vm_page_queues[PQ_INACTIVE + q].pl,
957                                         m, pageq);
958                                 TAILQ_INSERT_TAIL(
959                                         &vm_page_queues[PQ_INACTIVE + q].pl,
960                                         m, pageq);
961                                 ++vm_swapcache_inactive_heuristic;
962                         }
963                         vm_page_and_queue_spin_unlock(m);
964                         vm_page_wakeup(m);
965                 } else if (maxlaunder > 0) {
966                         /*
967                          * We always want to try to flush some dirty pages if
968                          * we encounter them, to keep the system stable.
969                          * Normally this number is small, but under extreme
970                          * pressure where there are insufficient clean pages
971                          * on the inactive queue, we may have to go all out.
972                          */
973                         int swap_pageouts_ok;
974                         struct vnode *vp = NULL;
975
976                         swap_pageouts_ok = 0;
977                         object = m->object;
978                         if (object &&
979                             (object->type != OBJT_SWAP) && 
980                             (object->type != OBJT_DEFAULT)) {
981                                 swap_pageouts_ok = 1;
982                         } else {
983                                 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
984                                 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
985                                 vm_page_count_min(0));
986                                                                                 
987                         }
988
989                         /*
990                          * We don't bother paging objects that are "dead".  
991                          * Those objects are in a "rundown" state.
992                          */
993                         if (!swap_pageouts_ok || 
994                             (object == NULL) ||
995                             (object->flags & OBJ_DEAD)) {
996                                 vm_page_and_queue_spin_lock(m);
997                                 if (m->queue - m->pc == PQ_INACTIVE) {
998                                         TAILQ_REMOVE(
999                                             &vm_page_queues[PQ_INACTIVE + q].pl,
1000                                             m, pageq);
1001                                         TAILQ_INSERT_TAIL(
1002                                             &vm_page_queues[PQ_INACTIVE + q].pl,
1003                                             m, pageq);
1004                                         ++vm_swapcache_inactive_heuristic;
1005                                 }
1006                                 vm_page_and_queue_spin_unlock(m);
1007                                 vm_page_wakeup(m);
1008                                 continue;
1009                         }
1010
1011                         /*
1012                          * (m) is still busied.
1013                          *
1014                          * The object is already known NOT to be dead.   It
1015                          * is possible for the vget() to block the whole
1016                          * pageout daemon, but the new low-memory handling
1017                          * code should prevent it.
1018                          *
1019                          * The previous code skipped locked vnodes and, worse,
1020                          * reordered pages in the queue.  This results in
1021                          * completely non-deterministic operation because,
1022                          * quite often, a vm_fault has initiated an I/O and
1023                          * is holding a locked vnode at just the point where
1024                          * the pageout daemon is woken up.
1025                          *
1026                          * We can't wait forever for the vnode lock, we might
1027                          * deadlock due to a vn_read() getting stuck in
1028                          * vm_wait while holding this vnode.  We skip the 
1029                          * vnode if we can't get it in a reasonable amount
1030                          * of time.
1031                          *
1032                          * vpfailed is used to (try to) avoid the case where
1033                          * a large number of pages are associated with a
1034                          * locked vnode, which could cause the pageout daemon
1035                          * to stall for an excessive amount of time.
1036                          */
1037                         if (object->type == OBJT_VNODE) {
1038                                 int flags;
1039
1040                                 vp = object->handle;
1041                                 flags = LK_EXCLUSIVE | LK_NOOBJ;
1042                                 if (vp == vpfailed)
1043                                         flags |= LK_NOWAIT;
1044                                 else
1045                                         flags |= LK_TIMELOCK;
1046                                 vm_page_hold(m);
1047                                 vm_page_wakeup(m);
1048
1049                                 /*
1050                                  * We have unbusied (m) temporarily so we can
1051                                  * acquire the vp lock without deadlocking.
1052                                  * (m) is held to prevent destruction.
1053                                  */
1054                                 if (vget(vp, flags) != 0) {
1055                                         vpfailed = vp;
1056                                         ++pageout_lock_miss;
1057                                         if (object->flags & OBJ_MIGHTBEDIRTY)
1058                                                     ++*vnodes_skippedp;
1059                                         vm_page_unhold(m);
1060                                         continue;
1061                                 }
1062
1063                                 /*
1064                                  * The page might have been moved to another
1065                                  * queue during potential blocking in vget()
1066                                  * above.  The page might have been freed and
1067                                  * reused for another vnode.  The object might
1068                                  * have been reused for another vnode.
1069                                  */
1070                                 if (m->queue - m->pc != PQ_INACTIVE ||
1071                                     m->object != object ||
1072                                     object->handle != vp) {
1073                                         if (object->flags & OBJ_MIGHTBEDIRTY)
1074                                                 ++*vnodes_skippedp;
1075                                         vput(vp);
1076                                         vm_page_unhold(m);
1077                                         continue;
1078                                 }
1079         
1080                                 /*
1081                                  * The page may have been busied during the
1082                                  * blocking in vput();  We don't move the
1083                                  * page back onto the end of the queue so that
1084                                  * statistics are more correct if we don't.
1085                                  */
1086                                 if (vm_page_busy_try(m, TRUE)) {
1087                                         vput(vp);
1088                                         vm_page_unhold(m);
1089                                         continue;
1090                                 }
1091                                 vm_page_unhold(m);
1092
1093                                 /*
1094                                  * (m) is busied again
1095                                  *
1096                                  * We own the busy bit and remove our hold
1097                                  * bit.  If the page is still held it
1098                                  * might be undergoing I/O, so skip it.
1099                                  */
1100                                 if (m->hold_count) {
1101                                         vm_page_and_queue_spin_lock(m);
1102                                         if (m->queue - m->pc == PQ_INACTIVE) {
1103                                                 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl, m, pageq);
1104                                                 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE + q].pl, m, pageq);
1105                                                 ++vm_swapcache_inactive_heuristic;
1106                                         }
1107                                         vm_page_and_queue_spin_unlock(m);
1108                                         if (object->flags & OBJ_MIGHTBEDIRTY)
1109                                                 ++*vnodes_skippedp;
1110                                         vm_page_wakeup(m);
1111                                         vput(vp);
1112                                         continue;
1113                                 }
1114                                 /* (m) is left busied as we fall through */
1115                         }
1116
1117                         /*
1118                          * page is busy and not held here.
1119                          *
1120                          * If a page is dirty, then it is either being washed
1121                          * (but not yet cleaned) or it is still in the
1122                          * laundry.  If it is still in the laundry, then we
1123                          * start the cleaning operation. 
1124                          *
1125                          * decrement inactive_shortage on success to account
1126                          * for the (future) cleaned page.  Otherwise we
1127                          * could wind up laundering or cleaning too many
1128                          * pages.
1129                          */
1130                         if (vm_pageout_clean(m) != 0) {
1131                                 ++delta;
1132                                 --maxlaunder;
1133                         }
1134                         /* clean ate busy, page no longer accessible */
1135                         if (vp != NULL)
1136                                 vput(vp);
1137                 } else {
1138                         vm_page_wakeup(m);
1139                 }
1140         }
1141         vm_page_queues_spin_lock(PQ_INACTIVE + q);
1142         TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
1143         vm_page_queues_spin_unlock(PQ_INACTIVE + q);
1144         return (delta);
1145 }
1146
1147 static int
1148 vm_pageout_scan_active(int pass, int q,
1149                        int avail_shortage, int inactive_shortage,
1150                        int *recycle_countp)
1151 {
1152         struct vm_page marker;
1153         vm_page_t m;
1154         int actcount;
1155         int delta = 0;
1156         int maxscan;
1157
1158         /*
1159          * We want to move pages from the active queue to the inactive
1160          * queue to get the inactive queue to the inactive target.  If
1161          * we still have a page shortage from above we try to directly free
1162          * clean pages instead of moving them.
1163          *
1164          * If we do still have a shortage we keep track of the number of
1165          * pages we free or cache (recycle_count) as a measure of thrashing
1166          * between the active and inactive queues.
1167          *
1168          * If we were able to completely satisfy the free+cache targets
1169          * from the inactive pool we limit the number of pages we move
1170          * from the active pool to the inactive pool to 2x the pages we
1171          * had removed from the inactive pool (with a minimum of 1/5 the
1172          * inactive target).  If we were not able to completely satisfy
1173          * the free+cache targets we go for the whole target aggressively.
1174          *
1175          * NOTE: Both variables can end up negative.
1176          * NOTE: We are still in a critical section.
1177          */
1178
1179         bzero(&marker, sizeof(marker));
1180         marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
1181         marker.queue = PQ_ACTIVE + q;
1182         marker.pc = q;
1183         marker.wire_count = 1;
1184
1185         vm_page_queues_spin_lock(PQ_ACTIVE + q);
1186         TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1187         maxscan = vm_page_queues[PQ_ACTIVE + q].lcnt;
1188         vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1189
1190         while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
1191                maxscan-- > 0 && (avail_shortage - delta > 0 ||
1192                                 inactive_shortage > 0))
1193         {
1194                 vm_page_and_queue_spin_lock(m);
1195                 if (m != TAILQ_NEXT(&marker, pageq)) {
1196                         vm_page_and_queue_spin_unlock(m);
1197                         ++maxscan;
1198                         continue;
1199                 }
1200                 KKASSERT(m->queue - m->pc == PQ_ACTIVE);
1201                 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl,
1202                              &marker, pageq);
1203                 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
1204                                    &marker, pageq);
1205
1206                 /*
1207                  * Skip marker pages
1208                  */
1209                 if (m->flags & PG_MARKER) {
1210                         vm_page_and_queue_spin_unlock(m);
1211                         continue;
1212                 }
1213
1214                 /*
1215                  * Try to busy the page.  Don't mess with pages which are
1216                  * already busy or reorder them in the queue.
1217                  */
1218                 if (vm_page_busy_try(m, TRUE)) {
1219                         vm_page_and_queue_spin_unlock(m);
1220                         continue;
1221                 }
1222
1223                 /*
1224                  * Don't deactivate pages that are held, even if we can
1225                  * busy them.  (XXX why not?)
1226                  */
1227                 if (m->hold_count != 0) {
1228                         TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl,
1229                                      m, pageq);
1230                         TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE + q].pl,
1231                                           m, pageq);
1232                         vm_page_and_queue_spin_unlock(m);
1233                         vm_page_wakeup(m);
1234                         continue;
1235                 }
1236                 vm_page_and_queue_spin_unlock(m);
1237                 lwkt_yield();
1238
1239                 /*
1240                  * The page has been successfully busied and the page and
1241                  * queue are no longer locked.
1242                  */
1243
1244                 /*
1245                  * The count for pagedaemon pages is done after checking the
1246                  * page for eligibility...
1247                  */
1248                 mycpu->gd_cnt.v_pdpages++;
1249
1250                 /*
1251                  * Check to see "how much" the page has been used and clear
1252                  * the tracking access bits.  If the object has no references
1253                  * don't bother paying the expense.
1254                  */
1255                 actcount = 0;
1256                 if (m->object && m->object->ref_count != 0) {
1257                         if (m->flags & PG_REFERENCED)
1258                                 ++actcount;
1259                         actcount += pmap_ts_referenced(m);
1260                         if (actcount) {
1261                                 m->act_count += ACT_ADVANCE + actcount;
1262                                 if (m->act_count > ACT_MAX)
1263                                         m->act_count = ACT_MAX;
1264                         }
1265                 }
1266                 vm_page_flag_clear(m, PG_REFERENCED);
1267
1268                 /*
1269                  * actcount is only valid if the object ref_count is non-zero.
1270                  * If the page does not have an object, actcount will be zero.
1271                  */
1272                 if (actcount && m->object->ref_count != 0) {
1273                         vm_page_and_queue_spin_lock(m);
1274                         if (m->queue - m->pc == PQ_ACTIVE) {
1275                                 TAILQ_REMOVE(
1276                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1277                                         m, pageq);
1278                                 TAILQ_INSERT_TAIL(
1279                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1280                                         m, pageq);
1281                         }
1282                         vm_page_and_queue_spin_unlock(m);
1283                         vm_page_wakeup(m);
1284                 } else {
1285                         m->act_count -= min(m->act_count, ACT_DECLINE);
1286                         if (vm_pageout_algorithm ||
1287                             (m->object == NULL) ||
1288                             (m->object && (m->object->ref_count == 0)) ||
1289                             m->act_count < pass + 1
1290                         ) {
1291                                 /*
1292                                  * Deactivate the page.  If we had a
1293                                  * shortage from our inactive scan try to
1294                                  * free (cache) the page instead.
1295                                  *
1296                                  * Don't just blindly cache the page if
1297                                  * we do not have a shortage from the
1298                                  * inactive scan, that could lead to
1299                                  * gigabytes being moved.
1300                                  */
1301                                 --inactive_shortage;
1302                                 if (avail_shortage - delta > 0 ||
1303                                     (m->object && (m->object->ref_count == 0)))
1304                                 {
1305                                         if (avail_shortage - delta > 0)
1306                                                 ++*recycle_countp;
1307                                         vm_page_protect(m, VM_PROT_NONE);
1308                                         if (m->dirty == 0 &&
1309                                             (m->flags & PG_NEED_COMMIT) == 0 &&
1310                                             avail_shortage - delta > 0) {
1311                                                 vm_page_cache(m);
1312                                         } else {
1313                                                 vm_page_deactivate(m);
1314                                                 vm_page_wakeup(m);
1315                                         }
1316                                 } else {
1317                                         vm_page_deactivate(m);
1318                                         vm_page_wakeup(m);
1319                                 }
1320                                 ++delta;
1321                         } else {
1322                                 vm_page_and_queue_spin_lock(m);
1323                                 if (m->queue - m->pc == PQ_ACTIVE) {
1324                                         TAILQ_REMOVE(
1325                                             &vm_page_queues[PQ_ACTIVE + q].pl,
1326                                             m, pageq);
1327                                         TAILQ_INSERT_TAIL(
1328                                             &vm_page_queues[PQ_ACTIVE + q].pl,
1329                                             m, pageq);
1330                                 }
1331                                 vm_page_and_queue_spin_unlock(m);
1332                                 vm_page_wakeup(m);
1333                         }
1334                 }
1335         }
1336
1337         /*
1338          * Clean out our local marker.
1339          */
1340         vm_page_queues_spin_lock(PQ_ACTIVE + q);
1341         TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1342         vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1343
1344         return (delta);
1345 }
1346
1347 /*
1348  * The number of actually free pages can drop down to v_free_reserved,
1349  * we try to build the free count back above v_free_min.  Note that
1350  * vm_paging_needed() also returns TRUE if v_free_count is not at
1351  * least v_free_min so that is the minimum we must build the free
1352  * count to.
1353  *
1354  * We use a slightly higher target to improve hysteresis,
1355  * ((v_free_target + v_free_min) / 2).  Since v_free_target
1356  * is usually the same as v_cache_min this maintains about
1357  * half the pages in the free queue as are in the cache queue,
1358  * providing pretty good pipelining for pageout operation.
1359  *
1360  * The system operator can manipulate vm.v_cache_min and
1361  * vm.v_free_target to tune the pageout demon.  Be sure
1362  * to keep vm.v_free_min < vm.v_free_target.
1363  *
1364  * Note that the original paging target is to get at least
1365  * (free_min + cache_min) into (free + cache).  The slightly
1366  * higher target will shift additional pages from cache to free
1367  * without effecting the original paging target in order to
1368  * maintain better hysteresis and not have the free count always
1369  * be dead-on v_free_min.
1370  *
1371  * NOTE: we are still in a critical section.
1372  *
1373  * Pages moved from PQ_CACHE to totally free are not counted in the
1374  * pages_freed counter.
1375  */
1376 static void
1377 vm_pageout_scan_cache(int avail_shortage, int vnodes_skipped, int recycle_count)
1378 {
1379         struct vm_pageout_scan_info info;
1380         vm_page_t m;
1381
1382         while (vmstats.v_free_count <
1383                (vmstats.v_free_min + vmstats.v_free_target) / 2) {
1384                 /*
1385                  * This steals some code from vm/vm_page.c
1386                  */
1387                 static int cache_rover = 0;
1388
1389                 m = vm_page_list_find(PQ_CACHE, cache_rover & PQ_L2_MASK, FALSE);
1390                 if (m == NULL)
1391                         break;
1392                 /* page is returned removed from its queue and spinlocked */
1393                 if (vm_page_busy_try(m, TRUE)) {
1394                         vm_page_deactivate_locked(m);
1395                         vm_page_spin_unlock(m);
1396 #ifdef INVARIANTS
1397                         kprintf("Warning: busy page %p found in cache\n", m);
1398 #endif
1399                         continue;
1400                 }
1401                 vm_page_spin_unlock(m);
1402                 pagedaemon_wakeup();
1403                 lwkt_yield();
1404
1405                 /*
1406                  * Page has been successfully busied and it and its queue
1407                  * is no longer spinlocked.
1408                  */
1409                 if ((m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) ||
1410                     m->hold_count ||
1411                     m->wire_count) {
1412                         vm_page_deactivate(m);
1413                         vm_page_wakeup(m);
1414                         continue;
1415                 }
1416                 KKASSERT((m->flags & PG_MAPPED) == 0);
1417                 KKASSERT(m->dirty == 0);
1418                 cache_rover += PQ_PRIME2;
1419                 vm_pageout_page_free(m);
1420                 mycpu->gd_cnt.v_dfree++;
1421         }
1422
1423 #if !defined(NO_SWAPPING)
1424         /*
1425          * Idle process swapout -- run once per second.
1426          */
1427         if (vm_swap_idle_enabled) {
1428                 static long lsec;
1429                 if (time_second != lsec) {
1430                         vm_pageout_req_swapout |= VM_SWAP_IDLE;
1431                         vm_req_vmdaemon();
1432                         lsec = time_second;
1433                 }
1434         }
1435 #endif
1436                 
1437         /*
1438          * If we didn't get enough free pages, and we have skipped a vnode
1439          * in a writeable object, wakeup the sync daemon.  And kick swapout
1440          * if we did not get enough free pages.
1441          */
1442         if (vm_paging_target() > 0) {
1443                 if (vnodes_skipped && vm_page_count_min(0))
1444                         speedup_syncer();
1445 #if !defined(NO_SWAPPING)
1446                 if (vm_swap_enabled && vm_page_count_target()) {
1447                         vm_req_vmdaemon();
1448                         vm_pageout_req_swapout |= VM_SWAP_NORMAL;
1449                 }
1450 #endif
1451         }
1452
1453         /*
1454          * Handle catastrophic conditions.  Under good conditions we should
1455          * be at the target, well beyond our minimum.  If we could not even
1456          * reach our minimum the system is under heavy stress.
1457          *
1458          * Determine whether we have run out of memory.  This occurs when
1459          * swap_pager_full is TRUE and the only pages left in the page
1460          * queues are dirty.  We will still likely have page shortages.
1461          *
1462          * - swap_pager_full is set if insufficient swap was
1463          *   available to satisfy a requested pageout.
1464          *
1465          * - the inactive queue is bloated (4 x size of active queue),
1466          *   meaning it is unable to get rid of dirty pages and.
1467          *
1468          * - vm_page_count_min() without counting pages recycled from the
1469          *   active queue (recycle_count) means we could not recover
1470          *   enough pages to meet bare minimum needs.  This test only
1471          *   works if the inactive queue is bloated.
1472          *
1473          * - due to a positive avail_shortage we shifted the remaining
1474          *   dirty pages from the active queue to the inactive queue
1475          *   trying to find clean ones to free.
1476          */
1477         if (swap_pager_full && vm_page_count_min(recycle_count))
1478                 kprintf("Warning: system low on memory+swap!\n");
1479         if (swap_pager_full && vm_page_count_min(recycle_count) &&
1480             vmstats.v_inactive_count > vmstats.v_active_count * 4 &&
1481             avail_shortage > 0) {
1482                 /*
1483                  * Kill something.
1484                  */
1485                 info.bigproc = NULL;
1486                 info.bigsize = 0;
1487                 allproc_scan(vm_pageout_scan_callback, &info);
1488                 if (info.bigproc != NULL) {
1489                         killproc(info.bigproc, "out of swap space");
1490                         info.bigproc->p_nice = PRIO_MIN;
1491                         info.bigproc->p_usched->resetpriority(
1492                                 FIRST_LWP_IN_PROC(info.bigproc));
1493                         wakeup(&vmstats.v_free_count);
1494                         PRELE(info.bigproc);
1495                 }
1496         }
1497 }
1498
1499 /*
1500  * The caller must hold proc_token.
1501  */
1502 static int
1503 vm_pageout_scan_callback(struct proc *p, void *data)
1504 {
1505         struct vm_pageout_scan_info *info = data;
1506         vm_offset_t size;
1507
1508         /*
1509          * Never kill system processes or init.  If we have configured swap
1510          * then try to avoid killing low-numbered pids.
1511          */
1512         if ((p->p_flags & P_SYSTEM) || (p->p_pid == 1) ||
1513             ((p->p_pid < 48) && (vm_swap_size != 0))) {
1514                 return (0);
1515         }
1516
1517         /*
1518          * if the process is in a non-running type state,
1519          * don't touch it.
1520          */
1521         if (p->p_stat != SACTIVE && p->p_stat != SSTOP)
1522                 return (0);
1523
1524         /*
1525          * Get the approximate process size.  Note that anonymous pages
1526          * with backing swap will be counted twice, but there should not
1527          * be too many such pages due to the stress the VM system is
1528          * under at this point.
1529          */
1530         size = vmspace_anonymous_count(p->p_vmspace) +
1531                 vmspace_swap_count(p->p_vmspace);
1532
1533         /*
1534          * If the this process is bigger than the biggest one
1535          * remember it.
1536          */
1537         if (info->bigsize < size) {
1538                 if (info->bigproc)
1539                         PRELE(info->bigproc);
1540                 PHOLD(p);
1541                 info->bigproc = p;
1542                 info->bigsize = size;
1543         }
1544         lwkt_yield();
1545         return(0);
1546 }
1547
1548 /*
1549  * This routine tries to maintain the pseudo LRU active queue,
1550  * so that during long periods of time where there is no paging,
1551  * that some statistic accumulation still occurs.  This code
1552  * helps the situation where paging just starts to occur.
1553  */
1554 static void
1555 vm_pageout_page_stats(int q)
1556 {
1557         static int fullintervalcount = 0;
1558         struct vm_page marker;
1559         vm_page_t m;
1560         int pcount, tpcount;            /* Number of pages to check */
1561         int page_shortage;
1562
1563         page_shortage = (vmstats.v_inactive_target + vmstats.v_cache_max +
1564                          vmstats.v_free_min) -
1565                         (vmstats.v_free_count + vmstats.v_inactive_count +
1566                          vmstats.v_cache_count);
1567
1568         if (page_shortage <= 0)
1569                 return;
1570
1571         pcount = vm_page_queues[PQ_ACTIVE + q].lcnt;
1572         fullintervalcount += vm_pageout_stats_interval;
1573         if (fullintervalcount < vm_pageout_full_stats_interval) {
1574                 tpcount = (vm_pageout_stats_max * pcount) /
1575                           vmstats.v_page_count + 1;
1576                 if (pcount > tpcount)
1577                         pcount = tpcount;
1578         } else {
1579                 fullintervalcount = 0;
1580         }
1581
1582         bzero(&marker, sizeof(marker));
1583         marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
1584         marker.queue = PQ_ACTIVE + q;
1585         marker.pc = q;
1586         marker.wire_count = 1;
1587
1588         vm_page_queues_spin_lock(PQ_ACTIVE + q);
1589         TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1590         vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1591
1592         while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
1593                pcount-- > 0)
1594         {
1595                 int actcount;
1596
1597                 vm_page_and_queue_spin_lock(m);
1598                 if (m != TAILQ_NEXT(&marker, pageq)) {
1599                         vm_page_and_queue_spin_unlock(m);
1600                         ++pcount;
1601                         continue;
1602                 }
1603                 KKASSERT(m->queue - m->pc == PQ_ACTIVE);
1604                 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1605                 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
1606                                    &marker, pageq);
1607
1608                 /*
1609                  * Ignore markers
1610                  */
1611                 if (m->flags & PG_MARKER) {
1612                         vm_page_and_queue_spin_unlock(m);
1613                         continue;
1614                 }
1615
1616                 /*
1617                  * Ignore pages we can't busy
1618                  */
1619                 if (vm_page_busy_try(m, TRUE)) {
1620                         vm_page_and_queue_spin_unlock(m);
1621                         continue;
1622                 }
1623                 vm_page_and_queue_spin_unlock(m);
1624                 KKASSERT(m->queue - m->pc == PQ_ACTIVE);
1625
1626                 /*
1627                  * We now have a safely busied page, the page and queue
1628                  * spinlocks have been released.
1629                  *
1630                  * Ignore held pages
1631                  */
1632                 if (m->hold_count) {
1633                         vm_page_wakeup(m);
1634                         continue;
1635                 }
1636
1637                 /*
1638                  * Calculate activity
1639                  */
1640                 actcount = 0;
1641                 if (m->flags & PG_REFERENCED) {
1642                         vm_page_flag_clear(m, PG_REFERENCED);
1643                         actcount += 1;
1644                 }
1645                 actcount += pmap_ts_referenced(m);
1646
1647                 /*
1648                  * Update act_count and move page to end of queue.
1649                  */
1650                 if (actcount) {
1651                         m->act_count += ACT_ADVANCE + actcount;
1652                         if (m->act_count > ACT_MAX)
1653                                 m->act_count = ACT_MAX;
1654                         vm_page_and_queue_spin_lock(m);
1655                         if (m->queue - m->pc == PQ_ACTIVE) {
1656                                 TAILQ_REMOVE(
1657                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1658                                         m, pageq);
1659                                 TAILQ_INSERT_TAIL(
1660                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1661                                         m, pageq);
1662                         }
1663                         vm_page_and_queue_spin_unlock(m);
1664                         vm_page_wakeup(m);
1665                         continue;
1666                 }
1667
1668                 if (m->act_count == 0) {
1669                         /*
1670                          * We turn off page access, so that we have
1671                          * more accurate RSS stats.  We don't do this
1672                          * in the normal page deactivation when the
1673                          * system is loaded VM wise, because the
1674                          * cost of the large number of page protect
1675                          * operations would be higher than the value
1676                          * of doing the operation.
1677                          *
1678                          * We use the marker to save our place so
1679                          * we can release the spin lock.  both (m)
1680                          * and (next) will be invalid.
1681                          */
1682                         vm_page_protect(m, VM_PROT_NONE);
1683                         vm_page_deactivate(m);
1684                 } else {
1685                         m->act_count -= min(m->act_count, ACT_DECLINE);
1686                         vm_page_and_queue_spin_lock(m);
1687                         if (m->queue - m->pc == PQ_ACTIVE) {
1688                                 TAILQ_REMOVE(
1689                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1690                                         m, pageq);
1691                                 TAILQ_INSERT_TAIL(
1692                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1693                                         m, pageq);
1694                         }
1695                         vm_page_and_queue_spin_unlock(m);
1696                 }
1697                 vm_page_wakeup(m);
1698         }
1699
1700         /*
1701          * Remove our local marker
1702          */
1703         vm_page_queues_spin_lock(PQ_ACTIVE + q);
1704         TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1705         vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1706 }
1707
1708 static int
1709 vm_pageout_free_page_calc(vm_size_t count)
1710 {
1711         if (count < vmstats.v_page_count)
1712                  return 0;
1713         /*
1714          * free_reserved needs to include enough for the largest swap pager
1715          * structures plus enough for any pv_entry structs when paging.
1716          *
1717          * v_free_min           normal allocations
1718          * v_free_reserved      system allocations
1719          * v_pageout_free_min   allocations by pageout daemon
1720          * v_interrupt_free_min low level allocations (e.g swap structures)
1721          */
1722         if (vmstats.v_page_count > 1024)
1723                 vmstats.v_free_min = 64 + (vmstats.v_page_count - 1024) / 200;
1724         else
1725                 vmstats.v_free_min = 64;
1726         vmstats.v_free_reserved = vmstats.v_free_min * 4 / 8 + 7;
1727         vmstats.v_free_severe = vmstats.v_free_min * 4 / 8 + 0;
1728         vmstats.v_pageout_free_min = vmstats.v_free_min * 2 / 8 + 7;
1729         vmstats.v_interrupt_free_min = vmstats.v_free_min * 1 / 8 + 7;
1730
1731         return 1;
1732 }
1733
1734
1735 /*
1736  * vm_pageout is the high level pageout daemon.
1737  *
1738  * No requirements.
1739  */
1740 static void
1741 vm_pageout_thread(void)
1742 {
1743         int pass;
1744         int q;
1745
1746         /*
1747          * Initialize some paging parameters.
1748          */
1749         curthread->td_flags |= TDF_SYSTHREAD;
1750
1751         if (vmstats.v_page_count < 2000)
1752                 vm_pageout_page_count = 8;
1753
1754         vm_pageout_free_page_calc(vmstats.v_page_count);
1755
1756         /*
1757          * v_free_target and v_cache_min control pageout hysteresis.  Note
1758          * that these are more a measure of the VM cache queue hysteresis
1759          * then the VM free queue.  Specifically, v_free_target is the
1760          * high water mark (free+cache pages).
1761          *
1762          * v_free_reserved + v_cache_min (mostly means v_cache_min) is the
1763          * low water mark, while v_free_min is the stop.  v_cache_min must
1764          * be big enough to handle memory needs while the pageout daemon
1765          * is signalled and run to free more pages.
1766          */
1767         if (vmstats.v_free_count > 6144)
1768                 vmstats.v_free_target = 4 * vmstats.v_free_min + vmstats.v_free_reserved;
1769         else
1770                 vmstats.v_free_target = 2 * vmstats.v_free_min + vmstats.v_free_reserved;
1771
1772         /*
1773          * NOTE: With the new buffer cache b_act_count we want the default
1774          *       inactive target to be a percentage of available memory.
1775          *
1776          *       The inactive target essentially determines the minimum
1777          *       number of 'temporary' pages capable of caching one-time-use
1778          *       files when the VM system is otherwise full of pages
1779          *       belonging to multi-time-use files or active program data.
1780          *
1781          * NOTE: The inactive target is aggressively persued only if the
1782          *       inactive queue becomes too small.  If the inactive queue
1783          *       is large enough to satisfy page movement to free+cache
1784          *       then it is repopulated more slowly from the active queue.
1785          *       This allows a general inactive_target default to be set.
1786          *
1787          *       There is an issue here for processes which sit mostly idle
1788          *       'overnight', such as sshd, tcsh, and X.  Any movement from
1789          *       the active queue will eventually cause such pages to
1790          *       recycle eventually causing a lot of paging in the morning.
1791          *       To reduce the incidence of this pages cycled out of the
1792          *       buffer cache are moved directly to the inactive queue if
1793          *       they were only used once or twice.
1794          *
1795          *       The vfs.vm_cycle_point sysctl can be used to adjust this.
1796          *       Increasing the value (up to 64) increases the number of
1797          *       buffer recyclements which go directly to the inactive queue.
1798          */
1799         if (vmstats.v_free_count > 2048) {
1800                 vmstats.v_cache_min = vmstats.v_free_target;
1801                 vmstats.v_cache_max = 2 * vmstats.v_cache_min;
1802         } else {
1803                 vmstats.v_cache_min = 0;
1804                 vmstats.v_cache_max = 0;
1805         }
1806         vmstats.v_inactive_target = vmstats.v_free_count / 4;
1807
1808         /* XXX does not really belong here */
1809         if (vm_page_max_wired == 0)
1810                 vm_page_max_wired = vmstats.v_free_count / 3;
1811
1812         if (vm_pageout_stats_max == 0)
1813                 vm_pageout_stats_max = vmstats.v_free_target;
1814
1815         /*
1816          * Set interval in seconds for stats scan.
1817          */
1818         if (vm_pageout_stats_interval == 0)
1819                 vm_pageout_stats_interval = 5;
1820         if (vm_pageout_full_stats_interval == 0)
1821                 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
1822         
1823
1824         /*
1825          * Set maximum free per pass
1826          */
1827         if (vm_pageout_stats_free_max == 0)
1828                 vm_pageout_stats_free_max = 5;
1829
1830         swap_pager_swap_init();
1831         pass = 0;
1832
1833         /*
1834          * The pageout daemon is never done, so loop forever.
1835          */
1836         while (TRUE) {
1837                 int error;
1838                 int delta1;
1839                 int delta2;
1840                 int avail_shortage;
1841                 int inactive_shortage;
1842                 int vnodes_skipped = 0;
1843                 int recycle_count = 0;
1844                 int tmp;
1845
1846                 /*
1847                  * Wait for an action request.  If we timeout check to
1848                  * see if paging is needed (in case the normal wakeup
1849                  * code raced us).
1850                  */
1851                 if (vm_pages_needed == 0) {
1852                         error = tsleep(&vm_pages_needed,
1853                                        0, "psleep",
1854                                        vm_pageout_stats_interval * hz);
1855                         if (error &&
1856                             vm_paging_needed() == 0 &&
1857                             vm_pages_needed == 0) {
1858                                 for (q = 0; q < PQ_L2_SIZE; ++q)
1859                                         vm_pageout_page_stats(q);
1860                                 continue;
1861                         }
1862                         vm_pages_needed = 1;
1863                 }
1864
1865                 mycpu->gd_cnt.v_pdwakeups++;
1866
1867                 /*
1868                  * Do whatever cleanup that the pmap code can.
1869                  */
1870                 pmap_collect();
1871
1872                 /*
1873                  * Scan for pageout.  Try to avoid thrashing the system
1874                  * with activity.
1875                  *
1876                  * Calculate our target for the number of free+cache pages we
1877                  * want to get to.  This is higher then the number that causes
1878                  * allocations to stall (severe) in order to provide hysteresis,
1879                  * and if we don't make it all the way but get to the minimum
1880                  * we're happy.  Goose it a bit if there are multipler
1881                  * requests for memory.
1882                  */
1883                 avail_shortage = vm_paging_target() + vm_pageout_deficit;
1884                 vm_pageout_deficit = 0;
1885                 delta1 = 0;
1886                 if (avail_shortage > 0) {
1887                         for (q = 0; q < PQ_L2_SIZE; ++q) {
1888                                 delta1 += vm_pageout_scan_inactive(
1889                                             pass, q,
1890                                             PQAVERAGE(avail_shortage),
1891                                             &vnodes_skipped);
1892                         }
1893                         avail_shortage -= delta1;
1894                 }
1895
1896                 /*
1897                  * Figure out how many active pages we must deactivate.  If
1898                  * we were able to reach our target with just the inactive
1899                  * scan above we limit the number of active pages we
1900                  * deactivate to reduce unnecessary work.
1901                  */
1902                 inactive_shortage = vmstats.v_inactive_target -
1903                                     vmstats.v_inactive_count;
1904
1905                 /*
1906                  * If we were unable to free sufficient inactive pages to
1907                  * satisfy the free/cache queue requirements then simply
1908                  * reaching the inactive target may not be good enough.
1909                  * Try to deactivate pages in excess of the target based
1910                  * on the shortfall.
1911                  *
1912                  * However to prevent thrashing the VM system do not
1913                  * deactivate more than an additional 1/10 the inactive
1914                  * target's worth of active pages.
1915                  */
1916                 if (avail_shortage > 0) {
1917                         tmp = avail_shortage * 2;
1918                         if (tmp > vmstats.v_inactive_target / 10)
1919                                 tmp = vmstats.v_inactive_target / 10;
1920                         inactive_shortage += tmp;
1921                 }
1922
1923                 if (avail_shortage > 0 || inactive_shortage > 0) {
1924                         delta2 = 0;
1925                         for (q = 0; q < PQ_L2_SIZE; ++q) {
1926                                 delta2 += vm_pageout_scan_active(
1927                                                 pass, q,
1928                                                 PQAVERAGE(avail_shortage),
1929                                                 PQAVERAGE(inactive_shortage),
1930                                                 &recycle_count);
1931                         }
1932                         inactive_shortage -= delta2;
1933                         avail_shortage -= delta2;
1934                 }
1935
1936                 /*
1937                  * Finally free enough cache pages to meet our free page
1938                  * requirement and take more drastic measures if we are
1939                  * still in trouble.
1940                  */
1941                 vm_pageout_scan_cache(avail_shortage, vnodes_skipped,
1942                                       recycle_count);
1943
1944                 /*
1945                  * Wait for more work.
1946                  */
1947                 if (avail_shortage > 0) {
1948                         ++pass;
1949                         if (swap_pager_full) {
1950                                 /*
1951                                  * Running out of memory, catastrophic back-off
1952                                  * to one-second intervals.
1953                                  */
1954                                 tsleep(&vm_pages_needed, 0, "pdelay", hz);
1955                         } else if (pass < 10 && vm_pages_needed > 1) {
1956                                 /*
1957                                  * Normal operation, additional processes
1958                                  * have already kicked us.  Retry immediately.
1959                                  */
1960                         } else if (pass < 10) {
1961                                 /*
1962                                  * Normal operation, fewer processes.  Delay
1963                                  * a bit but allow wakeups.
1964                                  */
1965                                 vm_pages_needed = 0;
1966                                 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
1967                                 vm_pages_needed = 1;
1968                         } else {
1969                                 /*
1970                                  * We've taken too many passes, forced delay.
1971                                  */
1972                                 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
1973                         }
1974                 } else {
1975                         /*
1976                          * Interlocked wakeup of waiters (non-optional)
1977                          */
1978                         pass = 0;
1979                         if (vm_pages_needed && !vm_page_count_min(0)) {
1980                                 wakeup(&vmstats.v_free_count);
1981                                 vm_pages_needed = 0;
1982                         }
1983                 }
1984         }
1985 }
1986
1987 static struct kproc_desc page_kp = {
1988         "pagedaemon",
1989         vm_pageout_thread,
1990         &pagethread
1991 };
1992 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp)
1993
1994
1995 /*
1996  * Called after allocating a page out of the cache or free queue
1997  * to possibly wake the pagedaemon up to replentish our supply.
1998  *
1999  * We try to generate some hysteresis by waking the pagedaemon up
2000  * when our free+cache pages go below the free_min+cache_min level.
2001  * The pagedaemon tries to get the count back up to at least the
2002  * minimum, and through to the target level if possible.
2003  *
2004  * If the pagedaemon is already active bump vm_pages_needed as a hint
2005  * that there are even more requests pending.
2006  *
2007  * SMP races ok?
2008  * No requirements.
2009  */
2010 void
2011 pagedaemon_wakeup(void)
2012 {
2013         if (vm_paging_needed() && curthread != pagethread) {
2014                 if (vm_pages_needed == 0) {
2015                         vm_pages_needed = 1;    /* SMP race ok */
2016                         wakeup(&vm_pages_needed);
2017                 } else if (vm_page_count_min(0)) {
2018                         ++vm_pages_needed;      /* SMP race ok */
2019                 }
2020         }
2021 }
2022
2023 #if !defined(NO_SWAPPING)
2024
2025 /*
2026  * SMP races ok?
2027  * No requirements.
2028  */
2029 static void
2030 vm_req_vmdaemon(void)
2031 {
2032         static int lastrun = 0;
2033
2034         if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
2035                 wakeup(&vm_daemon_needed);
2036                 lastrun = ticks;
2037         }
2038 }
2039
2040 static int vm_daemon_callback(struct proc *p, void *data __unused);
2041
2042 /*
2043  * No requirements.
2044  */
2045 static void
2046 vm_daemon(void)
2047 {
2048         /*
2049          * XXX vm_daemon_needed specific token?
2050          */
2051         while (TRUE) {
2052                 tsleep(&vm_daemon_needed, 0, "psleep", 0);
2053                 if (vm_pageout_req_swapout) {
2054                         swapout_procs(vm_pageout_req_swapout);
2055                         vm_pageout_req_swapout = 0;
2056                 }
2057                 /*
2058                  * scan the processes for exceeding their rlimits or if
2059                  * process is swapped out -- deactivate pages
2060                  */
2061                 allproc_scan(vm_daemon_callback, NULL);
2062         }
2063 }
2064
2065 /*
2066  * Caller must hold proc_token.
2067  */
2068 static int
2069 vm_daemon_callback(struct proc *p, void *data __unused)
2070 {
2071         vm_pindex_t limit, size;
2072
2073         /*
2074          * if this is a system process or if we have already
2075          * looked at this process, skip it.
2076          */
2077         if (p->p_flags & (P_SYSTEM | P_WEXIT))
2078                 return (0);
2079
2080         /*
2081          * if the process is in a non-running type state,
2082          * don't touch it.
2083          */
2084         if (p->p_stat != SACTIVE && p->p_stat != SSTOP)
2085                 return (0);
2086
2087         /*
2088          * get a limit
2089          */
2090         limit = OFF_TO_IDX(qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
2091                                 p->p_rlimit[RLIMIT_RSS].rlim_max));
2092
2093         /*
2094          * let processes that are swapped out really be
2095          * swapped out.  Set the limit to nothing to get as
2096          * many pages out to swap as possible.
2097          */
2098         if (p->p_flags & P_SWAPPEDOUT)
2099                 limit = 0;
2100
2101         lwkt_gettoken(&p->p_vmspace->vm_map.token);
2102         size = vmspace_resident_count(p->p_vmspace);
2103         if (limit >= 0 && size >= limit) {
2104                 vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map, limit);
2105         }
2106         lwkt_reltoken(&p->p_vmspace->vm_map.token);
2107         return (0);
2108 }
2109
2110 #endif