kernel - Implement RLIMIT_RSS, Increase maximum supported swap
[dragonfly.git] / sys / vm / vm_pageout.c
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * The Mach Operating System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *      from: @(#)vm_pageout.c  7.4 (Berkeley) 5/7/91
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $FreeBSD: src/sys/vm/vm_pageout.c,v 1.151.2.15 2002/12/29 18:21:04 dillon Exp $
65  */
66
67 /*
68  *      The proverbial page-out daemon.
69  */
70
71 #include "opt_vm.h"
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/kernel.h>
75 #include <sys/proc.h>
76 #include <sys/kthread.h>
77 #include <sys/resourcevar.h>
78 #include <sys/signalvar.h>
79 #include <sys/vnode.h>
80 #include <sys/vmmeter.h>
81 #include <sys/sysctl.h>
82
83 #include <vm/vm.h>
84 #include <vm/vm_param.h>
85 #include <sys/lock.h>
86 #include <vm/vm_object.h>
87 #include <vm/vm_page.h>
88 #include <vm/vm_map.h>
89 #include <vm/vm_pageout.h>
90 #include <vm/vm_pager.h>
91 #include <vm/swap_pager.h>
92 #include <vm/vm_extern.h>
93
94 #include <sys/thread2.h>
95 #include <sys/spinlock2.h>
96 #include <vm/vm_page2.h>
97
98 /*
99  * System initialization
100  */
101
102 /* the kernel process "vm_pageout"*/
103 static int vm_pageout_page(vm_page_t m, int *max_launderp,
104                            int *vnodes_skippedp, struct vnode **vpfailedp,
105                            int pass, int vmflush_flags);
106 static int vm_pageout_clean_helper (vm_page_t, int);
107 static int vm_pageout_free_page_calc (vm_size_t count);
108 static void vm_pageout_page_free(vm_page_t m) ;
109 struct thread *pagethread;
110
111 #if !defined(NO_SWAPPING)
112 /* the kernel process "vm_daemon"*/
113 static void vm_daemon (void);
114 static struct   thread *vmthread;
115
116 static struct kproc_desc vm_kp = {
117         "vmdaemon",
118         vm_daemon,
119         &vmthread
120 };
121 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp);
122 #endif
123
124 int vm_pages_needed = 0;        /* Event on which pageout daemon sleeps */
125 int vm_pageout_deficit = 0;     /* Estimated number of pages deficit */
126 int vm_pageout_pages_needed = 0;/* pageout daemon needs pages */
127 int vm_page_free_hysteresis = 16;
128
129 #if !defined(NO_SWAPPING)
130 static int vm_pageout_req_swapout;
131 static int vm_daemon_needed;
132 #endif
133 static int vm_max_launder = 4096;
134 static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
135 static int vm_pageout_full_stats_interval = 0;
136 static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0;
137 static int defer_swap_pageouts=0;
138 static int disable_swap_pageouts=0;
139 static u_int vm_anonmem_decline = ACT_DECLINE;
140 static u_int vm_filemem_decline = ACT_DECLINE * 2;
141
142 #if defined(NO_SWAPPING)
143 static int vm_swap_enabled=0;
144 static int vm_swap_idle_enabled=0;
145 #else
146 static int vm_swap_enabled=1;
147 static int vm_swap_idle_enabled=0;
148 #endif
149
150 SYSCTL_UINT(_vm, VM_PAGEOUT_ALGORITHM, anonmem_decline,
151         CTLFLAG_RW, &vm_anonmem_decline, 0, "active->inactive anon memory");
152
153 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, filemem_decline,
154         CTLFLAG_RW, &vm_filemem_decline, 0, "active->inactive file cache");
155
156 SYSCTL_INT(_vm, OID_AUTO, page_free_hysteresis,
157         CTLFLAG_RW, &vm_page_free_hysteresis, 0,
158         "Free more pages than the minimum required");
159
160 SYSCTL_INT(_vm, OID_AUTO, max_launder,
161         CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout");
162
163 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
164         CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
165
166 SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
167         CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
168
169 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
170         CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
171
172 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
173         CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented");
174
175 #if defined(NO_SWAPPING)
176 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
177         CTLFLAG_RD, &vm_swap_enabled, 0, "");
178 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
179         CTLFLAG_RD, &vm_swap_idle_enabled, 0, "");
180 #else
181 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
182         CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
183 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
184         CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
185 #endif
186
187 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
188         CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
189
190 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
191         CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
192
193 static int pageout_lock_miss;
194 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
195         CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
196
197 int vm_page_max_wired;          /* XXX max # of wired pages system-wide */
198
199 #if !defined(NO_SWAPPING)
200 static vm_pindex_t vm_pageout_object_deactivate_pages(vm_map_t map,
201                         vm_object_t object, vm_pindex_t limit,
202                         vm_pindex_t obj_beg, vm_pindex_t obj_end);
203 static void vm_req_vmdaemon (void);
204 #endif
205 static void vm_pageout_page_stats(int q);
206
207 /*
208  * Calculate approximately how many pages on each queue to try to
209  * clean.  An exact calculation creates an edge condition when the
210  * queues are unbalanced so add significant slop.  The queue scans
211  * will stop early when targets are reached and will start where they
212  * left off on the next pass.
213  *
214  * We need to be generous here because there are all sorts of loading
215  * conditions that can cause edge cases if try to average over all queues.
216  * In particular, storage subsystems have become so fast that paging
217  * activity can become quite frantic.  Eventually we will probably need
218  * two paging threads, one for dirty pages and one for clean, to deal
219  * with the bandwidth requirements.
220
221  * So what we do is calculate a value that can be satisfied nominally by
222  * only having to scan half the queues.
223  */
224 static __inline int
225 PQAVERAGE(int n)
226 {
227         int avg;
228
229         if (n >= 0) {
230                 avg = ((n + (PQ_L2_SIZE - 1)) / (PQ_L2_SIZE / 2) + 1);
231         } else {
232                 avg = ((n - (PQ_L2_SIZE - 1)) / (PQ_L2_SIZE / 2) - 1);
233         }
234         return avg;
235 }
236
237 /*
238  * vm_pageout_clean_helper:
239  *
240  * Clean the page and remove it from the laundry.  The page must not be
241  * busy on-call.
242  * 
243  * We set the busy bit to cause potential page faults on this page to
244  * block.  Note the careful timing, however, the busy bit isn't set till
245  * late and we cannot do anything that will mess with the page.
246  */
247 static int
248 vm_pageout_clean_helper(vm_page_t m, int vmflush_flags)
249 {
250         vm_object_t object;
251         vm_page_t mc[BLIST_MAX_ALLOC];
252         int error;
253         int ib, is, page_base;
254         vm_pindex_t pindex = m->pindex;
255
256         object = m->object;
257
258         /*
259          * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
260          * with the new swapper, but we could have serious problems paging
261          * out other object types if there is insufficient memory.  
262          *
263          * Unfortunately, checking free memory here is far too late, so the
264          * check has been moved up a procedural level.
265          */
266
267         /*
268          * Don't mess with the page if it's busy, held, or special
269          *
270          * XXX do we really need to check hold_count here?  hold_count
271          * isn't supposed to mess with vm_page ops except prevent the
272          * page from being reused.
273          */
274         if (m->hold_count != 0 || (m->flags & PG_UNMANAGED)) {
275                 vm_page_wakeup(m);
276                 return 0;
277         }
278
279         /*
280          * Place page in cluster.  Align cluster for optimal swap space
281          * allocation (whether it is swap or not).  This is typically ~16-32
282          * pages, which also tends to align the cluster to multiples of the
283          * filesystem block size if backed by a filesystem.
284          */
285         page_base = pindex % BLIST_MAX_ALLOC;
286         mc[page_base] = m;
287         ib = page_base - 1;
288         is = page_base + 1;
289
290         /*
291          * Scan object for clusterable pages.
292          *
293          * We can cluster ONLY if: ->> the page is NOT
294          * clean, wired, busy, held, or mapped into a
295          * buffer, and one of the following:
296          * 1) The page is inactive, or a seldom used
297          *    active page.
298          * -or-
299          * 2) we force the issue.
300          *
301          * During heavy mmap/modification loads the pageout
302          * daemon can really fragment the underlying file
303          * due to flushing pages out of order and not trying
304          * align the clusters (which leave sporatic out-of-order
305          * holes).  To solve this problem we do the reverse scan
306          * first and attempt to align our cluster, then do a 
307          * forward scan if room remains.
308          */
309         vm_object_hold(object);
310
311         while (ib >= 0) {
312                 vm_page_t p;
313
314                 p = vm_page_lookup_busy_try(object, pindex - page_base + ib,
315                                             TRUE, &error);
316                 if (error || p == NULL)
317                         break;
318                 if ((p->queue - p->pc) == PQ_CACHE ||
319                     (p->flags & PG_UNMANAGED)) {
320                         vm_page_wakeup(p);
321                         break;
322                 }
323                 vm_page_test_dirty(p);
324                 if (((p->dirty & p->valid) == 0 &&
325                      (p->flags & PG_NEED_COMMIT) == 0) ||
326                     p->wire_count != 0 ||       /* may be held by buf cache */
327                     p->hold_count != 0) {       /* may be undergoing I/O */
328                         vm_page_wakeup(p);
329                         break;
330                 }
331                 if (p->queue - p->pc != PQ_INACTIVE) {
332                         if (p->queue - p->pc != PQ_ACTIVE ||
333                             (vmflush_flags & VM_PAGER_ALLOW_ACTIVE) == 0) {
334                                 vm_page_wakeup(p);
335                                 break;
336                         }
337                 }
338
339                 /*
340                  * Try to maintain page groupings in the cluster.
341                  */
342                 if (m->flags & PG_WINATCFLS)
343                         vm_page_flag_set(p, PG_WINATCFLS);
344                 else
345                         vm_page_flag_clear(p, PG_WINATCFLS);
346                 p->act_count = m->act_count;
347
348                 mc[ib] = p;
349                 --ib;
350         }
351         ++ib;   /* fixup */
352
353         while (is < BLIST_MAX_ALLOC &&
354                pindex - page_base + is < object->size) {
355                 vm_page_t p;
356
357                 p = vm_page_lookup_busy_try(object, pindex - page_base + is,
358                                             TRUE, &error);
359                 if (error || p == NULL)
360                         break;
361                 if (((p->queue - p->pc) == PQ_CACHE) ||
362                     (p->flags & PG_UNMANAGED)) {
363                         vm_page_wakeup(p);
364                         break;
365                 }
366                 vm_page_test_dirty(p);
367                 if (((p->dirty & p->valid) == 0 &&
368                      (p->flags & PG_NEED_COMMIT) == 0) ||
369                     p->wire_count != 0 ||       /* may be held by buf cache */
370                     p->hold_count != 0) {       /* may be undergoing I/O */
371                         vm_page_wakeup(p);
372                         break;
373                 }
374                 if (p->queue - p->pc != PQ_INACTIVE) {
375                         if (p->queue - p->pc != PQ_ACTIVE ||
376                             (vmflush_flags & VM_PAGER_ALLOW_ACTIVE) == 0) {
377                                 vm_page_wakeup(p);
378                                 break;
379                         }
380                 }
381
382                 /*
383                  * Try to maintain page groupings in the cluster.
384                  */
385                 if (m->flags & PG_WINATCFLS)
386                         vm_page_flag_set(p, PG_WINATCFLS);
387                 else
388                         vm_page_flag_clear(p, PG_WINATCFLS);
389                 p->act_count = m->act_count;
390
391                 mc[is] = p;
392                 ++is;
393         }
394
395         vm_object_drop(object);
396
397         /*
398          * we allow reads during pageouts...
399          */
400         return vm_pageout_flush(&mc[ib], is - ib, vmflush_flags);
401 }
402
403 /*
404  * vm_pageout_flush() - launder the given pages
405  *
406  *      The given pages are laundered.  Note that we setup for the start of
407  *      I/O ( i.e. busy the page ), mark it read-only, and bump the object
408  *      reference count all in here rather then in the parent.  If we want
409  *      the parent to do more sophisticated things we may have to change
410  *      the ordering.
411  *
412  *      The pages in the array must be busied by the caller and will be
413  *      unbusied by this function.
414  */
415 int
416 vm_pageout_flush(vm_page_t *mc, int count, int vmflush_flags)
417 {
418         vm_object_t object;
419         int pageout_status[count];
420         int numpagedout = 0;
421         int i;
422
423         /*
424          * Initiate I/O.  Bump the vm_page_t->busy counter.
425          */
426         for (i = 0; i < count; i++) {
427                 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
428                         ("vm_pageout_flush page %p index %d/%d: partially "
429                          "invalid page", mc[i], i, count));
430                 vm_page_io_start(mc[i]);
431         }
432
433         /*
434          * We must make the pages read-only.  This will also force the
435          * modified bit in the related pmaps to be cleared.  The pager
436          * cannot clear the bit for us since the I/O completion code
437          * typically runs from an interrupt.  The act of making the page
438          * read-only handles the case for us.
439          *
440          * Then we can unbusy the pages, we still hold a reference by virtue
441          * of our soft-busy.
442          */
443         for (i = 0; i < count; i++) {
444                 if (vmflush_flags & VM_PAGER_TRY_TO_CACHE)
445                         vm_page_protect(mc[i], VM_PROT_NONE);
446                 else
447                         vm_page_protect(mc[i], VM_PROT_READ);
448                 vm_page_wakeup(mc[i]);
449         }
450
451         object = mc[0]->object;
452         vm_object_pip_add(object, count);
453
454         vm_pager_put_pages(object, mc, count,
455             (vmflush_flags |
456              ((object == &kernel_object) ? VM_PAGER_PUT_SYNC : 0)),
457             pageout_status);
458
459         for (i = 0; i < count; i++) {
460                 vm_page_t mt = mc[i];
461
462                 switch (pageout_status[i]) {
463                 case VM_PAGER_OK:
464                         numpagedout++;
465                         break;
466                 case VM_PAGER_PEND:
467                         numpagedout++;
468                         break;
469                 case VM_PAGER_BAD:
470                         /*
471                          * Page outside of range of object. Right now we
472                          * essentially lose the changes by pretending it
473                          * worked.
474                          */
475                         vm_page_busy_wait(mt, FALSE, "pgbad");
476                         pmap_clear_modify(mt);
477                         vm_page_undirty(mt);
478                         vm_page_wakeup(mt);
479                         break;
480                 case VM_PAGER_ERROR:
481                 case VM_PAGER_FAIL:
482                         /*
483                          * A page typically cannot be paged out when we
484                          * have run out of swap.  We leave the page
485                          * marked inactive and will try to page it out
486                          * again later.
487                          *
488                          * Starvation of the active page list is used to
489                          * determine when the system is massively memory
490                          * starved.
491                          */
492                         break;
493                 case VM_PAGER_AGAIN:
494                         break;
495                 }
496
497                 /*
498                  * If not PENDing this was a synchronous operation and we
499                  * clean up after the I/O.  If it is PENDing the mess is
500                  * cleaned up asynchronously.
501                  *
502                  * Also nominally act on the caller's wishes if the caller
503                  * wants to try to really clean (cache or free) the page.
504                  *
505                  * Also nominally deactivate the page if the system is
506                  * memory-stressed.
507                  */
508                 if (pageout_status[i] != VM_PAGER_PEND) {
509                         vm_page_busy_wait(mt, FALSE, "pgouw");
510                         vm_page_io_finish(mt);
511                         if (vmflush_flags & VM_PAGER_TRY_TO_CACHE) {
512                                 vm_page_try_to_cache(mt);
513                         } else if (vm_page_count_severe()) {
514                                 vm_page_deactivate(mt);
515                                 vm_page_wakeup(mt);
516                         } else {
517                                 vm_page_wakeup(mt);
518                         }
519                         vm_object_pip_wakeup(object);
520                 }
521         }
522         return numpagedout;
523 }
524
525 #if !defined(NO_SWAPPING)
526
527 /*
528  * Deactivate pages until the map RSS falls below the specified limit.
529  *
530  * This code is part of the process rlimit and vm_daemon handler and not
531  * part of the normal demand-paging code.  We only check the top-level
532  * object.
533  *
534  * The map must be locked.
535  * The caller must hold the vm_object.
536  */
537 static int vm_pageout_object_deactivate_pages_callback(vm_page_t, void *);
538 static int vm_pageout_object_deactivate_pages_cmp(vm_page_t, void *);
539
540 static vm_pindex_t
541 vm_pageout_object_deactivate_pages(vm_map_t map, vm_object_t object,
542                                    vm_pindex_t limit,
543                                    vm_pindex_t obj_beg,
544                                    vm_pindex_t obj_end)
545 {
546         struct rb_vm_page_scan_info info;
547         int remove_mode;
548
549         ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
550
551         info.count = 0;
552         info.backing_offset_index = obj_beg;
553         info.backing_object = object;
554
555         for (;;) {
556                 vm_pindex_t advance;
557
558                 if (pmap_resident_tlnw_count(vm_map_pmap(map)) <= limit)
559                         break;
560                 if (object->type == OBJT_DEVICE ||
561                     object->type == OBJT_MGTDEVICE ||
562                     object->type == OBJT_PHYS) {
563                         break;
564                 }
565 #if 0
566                 if (object->paging_in_progress)
567                         break;
568 #endif
569
570                 remove_mode = 0;
571                 if (object->shadow_count > 1)
572                         remove_mode = 1;
573
574                 /*
575                  * scan the objects entire memory queue.  We hold the
576                  * object's token so the scan should not race anything.
577                  *
578                  * The callback will adjust backing_offset_index past the
579                  * last index scanned.  This value only matters if we
580                  * terminate early.
581                  */
582                 info.limit = remove_mode;
583                 info.map = map;
584                 info.desired = limit;
585                 info.start_pindex = obj_beg;
586                 info.end_pindex = obj_end;
587                 info.object = object;
588
589                 vm_page_rb_tree_RB_SCAN(&object->rb_memq,
590                                 vm_pageout_object_deactivate_pages_cmp,
591                                 vm_pageout_object_deactivate_pages_callback,
592                                 &info);
593
594                 /*
595                  * Backing object recursion (we will loop up).
596                  */
597                 while ((object = info.object->backing_object) != NULL) {
598                         vm_object_hold(object);
599                         if (object != info.object->backing_object) {
600                                 vm_object_drop(object);
601                                 continue;
602                         }
603                         break;
604                 }
605                 if (object == NULL) {
606                         if (info.object != info.backing_object)
607                                 vm_object_drop(info.object);
608                         break;
609                 }
610                 advance = OFF_TO_IDX(info.object->backing_object_offset);
611                 info.start_pindex += advance;
612                 info.end_pindex += advance;
613                 info.backing_offset_index += advance;
614                 if (info.object != info.backing_object) {
615                         vm_object_lock_swap();
616                         vm_object_drop(info.object);
617                 }
618                 info.object = object;
619                 kprintf("X");
620         }
621
622         /*
623          * Return how far we want the caller to advance.  The caller will
624          * ignore this value and use obj_end if the RSS limit is still not
625          * satisfied.
626          */
627         return (info.backing_offset_index - info.start_pindex);
628 }
629
630 /*
631  * Only page indices above start_pindex
632  */
633 static
634 int
635 vm_pageout_object_deactivate_pages_cmp(vm_page_t p, void *data)
636 {
637         struct rb_vm_page_scan_info *info = data;
638
639         if (p->pindex < info->start_pindex)
640                 return -1;
641         if (p->pindex >= info->end_pindex)
642                 return +1;
643         return 0;
644 }
645
646 /*
647  * The caller must hold the vm_object.
648  *
649  * info->count is bumped for every page removed from the process pmap.
650  *
651  * info->backing_offset_index is updated past the last scanned page.
652  * This value will be ignored and the scan forced to the mapent boundary
653  * by the caller if the resident count remains too high.
654  */
655 static int
656 vm_pageout_object_deactivate_pages_callback(vm_page_t p, void *data)
657 {
658         struct rb_vm_page_scan_info *info = data;
659         int actcount;
660         int cleanit = 0;
661
662         /*
663          * Basic tests - There should never be a marker, and we can stop
664          *               once the RSS is below the required level.
665          */
666         KKASSERT((p->flags & PG_MARKER) == 0);
667         if (pmap_resident_tlnw_count(vm_map_pmap(info->map)) <=
668             info->desired) {
669                 return(-1);
670         }
671
672         mycpu->gd_cnt.v_pdpages++;
673         info->backing_offset_index = p->pindex + 1;
674
675         if (vm_page_busy_try(p, TRUE))
676                 return(0);
677
678         if (p->object != info->object) {
679                 vm_page_wakeup(p);
680                 return(0);
681         }
682         if (p->wire_count || p->hold_count || (p->flags & PG_UNMANAGED)) {
683                 vm_page_wakeup(p);
684                 goto done;
685         }
686         if (!pmap_page_exists_quick(vm_map_pmap(info->map), p)) {
687                 vm_page_wakeup(p);
688                 goto done;
689         }
690
691         actcount = pmap_ts_referenced(p);
692         if (actcount) {
693                 vm_page_flag_set(p, PG_REFERENCED);
694         } else if (p->flags & PG_REFERENCED) {
695                 actcount = 1;
696         }
697
698         vm_page_and_queue_spin_lock(p);
699         if (p->queue - p->pc != PQ_ACTIVE && (p->flags & PG_REFERENCED)) {
700                 vm_page_and_queue_spin_unlock(p);
701                 vm_page_activate(p);
702                 p->act_count += actcount;
703                 vm_page_flag_clear(p, PG_REFERENCED);
704         } else if (p->queue - p->pc == PQ_ACTIVE) {
705                 if ((p->flags & PG_REFERENCED) == 0) {
706                         /* use ACT_ADVANCE for a faster decline */
707                         p->act_count -= min(p->act_count, ACT_ADVANCE);
708                         if (!info->limit &&
709                             (vm_pageout_algorithm || (p->act_count == 0))) {
710                                 vm_page_and_queue_spin_unlock(p);
711                                 vm_page_deactivate(p);
712                                 cleanit = 1;
713                         } else {
714                                 TAILQ_REMOVE(&vm_page_queues[p->queue].pl,
715                                              p, pageq);
716                                 TAILQ_INSERT_TAIL(&vm_page_queues[p->queue].pl,
717                                                   p, pageq);
718                                 vm_page_and_queue_spin_unlock(p);
719                         }
720                 } else {
721                         vm_page_and_queue_spin_unlock(p);
722                         vm_page_activate(p);
723                         vm_page_flag_clear(p, PG_REFERENCED);
724
725                         vm_page_and_queue_spin_lock(p);
726                         if (p->queue - p->pc == PQ_ACTIVE) {
727                                 if (p->act_count < (ACT_MAX - ACT_ADVANCE))
728                                         p->act_count += ACT_ADVANCE;
729                                 TAILQ_REMOVE(&vm_page_queues[p->queue].pl,
730                                              p, pageq);
731                                 TAILQ_INSERT_TAIL(&vm_page_queues[p->queue].pl,
732                                                   p, pageq);
733                         }
734                         vm_page_and_queue_spin_unlock(p);
735                 }
736         } else if (p->queue - p->pc == PQ_INACTIVE) {
737 #if 0
738                 TAILQ_REMOVE(&vm_page_queues[p->queue].pl,
739                              p, pageq);
740                 TAILQ_INSERT_HEAD(&vm_page_queues[p->queue].pl,
741                                   p, pageq);
742 #endif
743                 /* use ACT_ADVANCE for a faster decline */
744                 p->act_count -= min(p->act_count, ACT_ADVANCE);
745                 vm_page_and_queue_spin_unlock(p);
746                 if (p->act_count == 0) {
747                         cleanit = 1;
748                 }
749         } else {
750                 vm_page_and_queue_spin_unlock(p);
751         }
752
753         /*
754          * Ok, try to fully clean the page and any nearby pages such that at
755          * least the requested page is freed or moved to the cache queue.
756          *
757          * We usually do this synchronously to allow us to get the page into
758          * the CACHE queue quickly, which will prevent memory exhaustion if
759          * a process with a memoryuse limit is running away.  However, the
760          * sysadmin may desire to set vm.swap_user_async which relaxes this
761          * and improves write performance.
762          */
763         if (cleanit) {
764                 int max_launder = 0x7FFF;
765                 int vnodes_skipped = 0;
766                 int vmflush_flags;
767                 struct vnode *vpfailed = NULL;
768
769                 vmflush_flags = VM_PAGER_TRY_TO_CACHE | VM_PAGER_ALLOW_ACTIVE;
770                 if (swap_user_async == 0)
771                         vmflush_flags |= VM_PAGER_PUT_SYNC;
772
773                 vm_page_protect(p, VM_PROT_NONE);
774                 vm_page_flag_set(p, PG_WINATCFLS);
775                 info->count += vm_pageout_page(p, &max_launder, &vnodes_skipped,
776                                                &vpfailed, 1, vmflush_flags);
777         } else {
778                 vm_page_wakeup(p);
779         }
780
781 done:
782         lwkt_user_yield();
783         return(0);
784 }
785
786 /*
787  * Deactivate some number of pages in a map due to set RLIMIT_RSS limits.
788  * that is relatively difficult to do.
789  */
790 void
791 vm_pageout_map_deactivate_pages(vm_map_t map, vm_pindex_t limit)
792 {
793         vm_map_entry_t tmpe;
794         vm_object_t obj;
795         vm_ooffset_t pgout_offset;
796         vm_ooffset_t tmpe_end;
797         vm_pindex_t obj_beg;
798         vm_pindex_t obj_end;
799         vm_pindex_t count;
800         int retries = 3;
801
802         lockmgr(&map->lock, LK_EXCLUSIVE);
803
804         /*
805          * Scan the map incrementally.
806          */
807         pgout_offset = map->pgout_offset;
808 again:
809         tmpe = map->header.next;
810         obj_beg = 0;
811         obj_end = 0;
812         tmpe_end = 0;
813         obj = NULL;
814
815         while (tmpe != &map->header) {
816                 if (tmpe->end <= pgout_offset) {
817                         tmpe = tmpe->next;
818                         continue;
819                 }
820                 if (tmpe->maptype == VM_MAPTYPE_NORMAL ||
821                     tmpe->maptype == VM_MAPTYPE_VPAGETABLE) {
822                         obj = tmpe->object.vm_object;
823                         if (obj && obj->shadow_count <= 1) {
824                                 if (pgout_offset < tmpe->start) {
825                                         obj_beg = tmpe->offset >> PAGE_SHIFT;
826                                         obj_end = ((tmpe->end - tmpe->start) +
827                                                    tmpe->offset) >> PAGE_SHIFT;
828                                 } else {
829                                         obj_beg = (pgout_offset - tmpe->start +
830                                                    tmpe->offset) >> PAGE_SHIFT;
831                                         obj_end = (tmpe->end - tmpe->start +
832                                                    tmpe->offset) >> PAGE_SHIFT;
833                                 }
834                                 tmpe_end = tmpe->end;
835                                 break;
836                         }
837                         obj = NULL;
838                 }
839                 tmpe = tmpe->next;
840         }
841
842         /*
843          * Attempt to continue where we left off until the RLIMIT is
844          * satisfied or we run out of retries.  Note that the map remains
845          * locked, so the program is not going to be taking any faults
846          * while we are doing this.
847          */
848         if (obj)  {
849                 vm_object_hold(obj);
850                 count = vm_pageout_object_deactivate_pages(map, obj, limit,
851                                                    obj_beg, obj_end);
852                 vm_object_drop(obj);
853                 if (pmap_resident_tlnw_count(vm_map_pmap(map)) > limit) {
854                         pgout_offset = tmpe_end;
855                         goto again;
856                 }
857
858                 /*
859                  * Early termination.
860                  */
861                 pgout_offset += count << PAGE_SHIFT;
862         } else {
863                 pgout_offset = 0;
864                 if (pmap_resident_tlnw_count(vm_map_pmap(map)) > limit) {
865                         if (retries) {
866                                 --retries;
867                                 goto again;
868                         }
869                 }
870         }
871
872         map->pgout_offset = pgout_offset;
873
874         vm_map_unlock(map);
875 }
876 #endif
877
878 /*
879  * Called when the pageout scan wants to free a page.  We no longer
880  * try to cycle the vm_object here with a reference & dealloc, which can
881  * cause a non-trivial object collapse in a critical path.
882  *
883  * It is unclear why we cycled the ref_count in the past, perhaps to try
884  * to optimize shadow chain collapses but I don't quite see why it would
885  * be necessary.  An OBJ_DEAD object should terminate any and all vm_pages
886  * synchronously and not have to be kicked-start.
887  */
888 static void
889 vm_pageout_page_free(vm_page_t m) 
890 {
891         vm_page_protect(m, VM_PROT_NONE);
892         vm_page_free(m);
893 }
894
895 /*
896  * vm_pageout_scan does the dirty work for the pageout daemon.
897  */
898 struct vm_pageout_scan_info {
899         struct proc *bigproc;
900         vm_offset_t bigsize;
901 };
902
903 static int vm_pageout_scan_callback(struct proc *p, void *data);
904
905 static int
906 vm_pageout_scan_inactive(int pass, int q, int avail_shortage,
907                          int *vnodes_skipped)
908 {
909         vm_page_t m;
910         struct vm_page marker;
911         struct vnode *vpfailed;         /* warning, allowed to be stale */
912         int maxscan;
913         int delta = 0;
914         int max_launder;
915
916         /*
917          * Start scanning the inactive queue for pages we can move to the
918          * cache or free.  The scan will stop when the target is reached or
919          * we have scanned the entire inactive queue.  Note that m->act_count
920          * is not used to form decisions for the inactive queue, only for the
921          * active queue.
922          *
923          * max_launder limits the number of dirty pages we flush per scan.
924          * For most systems a smaller value (16 or 32) is more robust under
925          * extreme memory and disk pressure because any unnecessary writes
926          * to disk can result in extreme performance degredation.  However,
927          * systems with excessive dirty pages (especially when MAP_NOSYNC is
928          * used) will die horribly with limited laundering.  If the pageout
929          * daemon cannot clean enough pages in the first pass, we let it go
930          * all out in succeeding passes.
931          */
932         if ((max_launder = vm_max_launder) <= 1)
933                 max_launder = 1;
934         if (pass)
935                 max_launder = 10000;
936
937         /*
938          * Initialize our marker
939          */
940         bzero(&marker, sizeof(marker));
941         marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
942         marker.queue = PQ_INACTIVE + q;
943         marker.pc = q;
944         marker.wire_count = 1;
945
946         /*
947          * Inactive queue scan.
948          *
949          * NOTE: The vm_page must be spinlocked before the queue to avoid
950          *       deadlocks, so it is easiest to simply iterate the loop
951          *       with the queue unlocked at the top.
952          */
953         vpfailed = NULL;
954
955         vm_page_queues_spin_lock(PQ_INACTIVE + q);
956         TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
957         maxscan = vm_page_queues[PQ_INACTIVE + q].lcnt;
958
959         /*
960          * Queue locked at top of loop to avoid stack marker issues.
961          */
962         while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
963                maxscan-- > 0 && avail_shortage - delta > 0)
964         {
965                 int count;
966
967                 KKASSERT(m->queue == PQ_INACTIVE + q);
968                 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl,
969                              &marker, pageq);
970                 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE + q].pl, m,
971                                    &marker, pageq);
972                 mycpu->gd_cnt.v_pdpages++;
973
974                 /*
975                  * Skip marker pages (atomic against other markers to avoid
976                  * infinite hop-over scans).
977                  */
978                 if (m->flags & PG_MARKER)
979                         continue;
980
981                 /*
982                  * Try to busy the page.  Don't mess with pages which are
983                  * already busy or reorder them in the queue.
984                  */
985                 if (vm_page_busy_try(m, TRUE))
986                         continue;
987
988                 /*
989                  * Remaining operations run with the page busy and neither
990                  * the page or the queue will be spin-locked.
991                  */
992                 vm_page_queues_spin_unlock(PQ_INACTIVE + q);
993                 KKASSERT(m->queue == PQ_INACTIVE + q);
994
995                 count = vm_pageout_page(m, &max_launder, vnodes_skipped,
996                                         &vpfailed, pass, 0);
997                 delta += count;
998
999                 /*
1000                  * Systems with a ton of memory can wind up with huge
1001                  * deactivation counts.  Because the inactive scan is
1002                  * doing a lot of flushing, the combination can result
1003                  * in excessive paging even in situations where other
1004                  * unrelated threads free up sufficient VM.
1005                  *
1006                  * To deal with this we abort the nominal active->inactive
1007                  * scan before we hit the inactive target when free+cache
1008                  * levels have reached a reasonable target.
1009                  *
1010                  * When deciding to stop early we need to add some slop to
1011                  * the test and we need to return full completion to the caller
1012                  * to prevent the caller from thinking there is something
1013                  * wrong and issuing a low-memory+swap warning or pkill.
1014                  *
1015                  * A deficit forces paging regardless of the state of the
1016                  * VM page queues (used for RSS enforcement).
1017                  */
1018                 lwkt_yield();
1019                 vm_page_queues_spin_lock(PQ_INACTIVE + q);
1020                 if (vm_paging_target() < -vm_max_launder) {
1021                         /*
1022                          * Stopping early, return full completion to caller.
1023                          */
1024                         if (delta < avail_shortage)
1025                                 delta = avail_shortage;
1026                         break;
1027                 }
1028         }
1029
1030         /* page queue still spin-locked */
1031         TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
1032         vm_page_queues_spin_unlock(PQ_INACTIVE + q);
1033
1034         return (delta);
1035 }
1036
1037 /*
1038  * Pageout the specified page, return the total number of pages paged out
1039  * (this routine may cluster).
1040  *
1041  * The page must be busied and soft-busied by the caller and will be disposed
1042  * of by this function.
1043  */
1044 static int
1045 vm_pageout_page(vm_page_t m, int *max_launderp, int *vnodes_skippedp,
1046                 struct vnode **vpfailedp, int pass, int vmflush_flags)
1047 {
1048         vm_object_t object;
1049         int actcount;
1050         int count = 0;
1051
1052         /*
1053          * It is possible for a page to be busied ad-hoc (e.g. the
1054          * pmap_collect() code) and wired and race against the
1055          * allocation of a new page.  vm_page_alloc() may be forced
1056          * to deactivate the wired page in which case it winds up
1057          * on the inactive queue and must be handled here.  We
1058          * correct the problem simply by unqueuing the page.
1059          */
1060         if (m->wire_count) {
1061                 vm_page_unqueue_nowakeup(m);
1062                 vm_page_wakeup(m);
1063                 kprintf("WARNING: pagedaemon: wired page on "
1064                         "inactive queue %p\n", m);
1065                 return 0;
1066         }
1067
1068         /*
1069          * A held page may be undergoing I/O, so skip it.
1070          */
1071         if (m->hold_count) {
1072                 vm_page_and_queue_spin_lock(m);
1073                 if (m->queue - m->pc == PQ_INACTIVE) {
1074                         TAILQ_REMOVE(
1075                                 &vm_page_queues[m->queue].pl, m, pageq);
1076                         TAILQ_INSERT_TAIL(
1077                                 &vm_page_queues[m->queue].pl, m, pageq);
1078                         ++vm_swapcache_inactive_heuristic;
1079                 }
1080                 vm_page_and_queue_spin_unlock(m);
1081                 vm_page_wakeup(m);
1082                 return 0;
1083         }
1084
1085         if (m->object == NULL || m->object->ref_count == 0) {
1086                 /*
1087                  * If the object is not being used, we ignore previous
1088                  * references.
1089                  */
1090                 vm_page_flag_clear(m, PG_REFERENCED);
1091                 pmap_clear_reference(m);
1092                 /* fall through to end */
1093         } else if (((m->flags & PG_REFERENCED) == 0) &&
1094                     (actcount = pmap_ts_referenced(m))) {
1095                 /*
1096                  * Otherwise, if the page has been referenced while
1097                  * in the inactive queue, we bump the "activation
1098                  * count" upwards, making it less likely that the
1099                  * page will be added back to the inactive queue
1100                  * prematurely again.  Here we check the page tables
1101                  * (or emulated bits, if any), given the upper level
1102                  * VM system not knowing anything about existing
1103                  * references.
1104                  */
1105                 vm_page_activate(m);
1106                 m->act_count += (actcount + ACT_ADVANCE);
1107                 vm_page_wakeup(m);
1108                 return 0;
1109         }
1110
1111         /*
1112          * (m) is still busied.
1113          *
1114          * If the upper level VM system knows about any page
1115          * references, we activate the page.  We also set the
1116          * "activation count" higher than normal so that we will less
1117          * likely place pages back onto the inactive queue again.
1118          */
1119         if ((m->flags & PG_REFERENCED) != 0) {
1120                 vm_page_flag_clear(m, PG_REFERENCED);
1121                 actcount = pmap_ts_referenced(m);
1122                 vm_page_activate(m);
1123                 m->act_count += (actcount + ACT_ADVANCE + 1);
1124                 vm_page_wakeup(m);
1125                 return 0;
1126         }
1127
1128         /*
1129          * If the upper level VM system doesn't know anything about
1130          * the page being dirty, we have to check for it again.  As
1131          * far as the VM code knows, any partially dirty pages are
1132          * fully dirty.
1133          *
1134          * Pages marked PG_WRITEABLE may be mapped into the user
1135          * address space of a process running on another cpu.  A
1136          * user process (without holding the MP lock) running on
1137          * another cpu may be able to touch the page while we are
1138          * trying to remove it.  vm_page_cache() will handle this
1139          * case for us.
1140          */
1141         if (m->dirty == 0) {
1142                 vm_page_test_dirty(m);
1143         } else {
1144                 vm_page_dirty(m);
1145         }
1146
1147         if (m->valid == 0 && (m->flags & PG_NEED_COMMIT) == 0) {
1148                 /*
1149                  * Invalid pages can be easily freed
1150                  */
1151                 vm_pageout_page_free(m);
1152                 mycpu->gd_cnt.v_dfree++;
1153                 ++count;
1154         } else if (m->dirty == 0 && (m->flags & PG_NEED_COMMIT) == 0) {
1155                 /*
1156                  * Clean pages can be placed onto the cache queue.
1157                  * This effectively frees them.
1158                  */
1159                 vm_page_cache(m);
1160                 ++count;
1161         } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) {
1162                 /*
1163                  * Dirty pages need to be paged out, but flushing
1164                  * a page is extremely expensive verses freeing
1165                  * a clean page.  Rather then artificially limiting
1166                  * the number of pages we can flush, we instead give
1167                  * dirty pages extra priority on the inactive queue
1168                  * by forcing them to be cycled through the queue
1169                  * twice before being flushed, after which the
1170                  * (now clean) page will cycle through once more
1171                  * before being freed.  This significantly extends
1172                  * the thrash point for a heavily loaded machine.
1173                  */
1174                 vm_page_flag_set(m, PG_WINATCFLS);
1175                 vm_page_and_queue_spin_lock(m);
1176                 if (m->queue - m->pc == PQ_INACTIVE) {
1177                         TAILQ_REMOVE(
1178                                 &vm_page_queues[m->queue].pl, m, pageq);
1179                         TAILQ_INSERT_TAIL(
1180                                 &vm_page_queues[m->queue].pl, m, pageq);
1181                         ++vm_swapcache_inactive_heuristic;
1182                 }
1183                 vm_page_and_queue_spin_unlock(m);
1184                 vm_page_wakeup(m);
1185         } else if (*max_launderp > 0) {
1186                 /*
1187                  * We always want to try to flush some dirty pages if
1188                  * we encounter them, to keep the system stable.
1189                  * Normally this number is small, but under extreme
1190                  * pressure where there are insufficient clean pages
1191                  * on the inactive queue, we may have to go all out.
1192                  */
1193                 int swap_pageouts_ok;
1194                 struct vnode *vp = NULL;
1195
1196                 swap_pageouts_ok = 0;
1197                 object = m->object;
1198                 if (object &&
1199                     (object->type != OBJT_SWAP) &&
1200                     (object->type != OBJT_DEFAULT)) {
1201                         swap_pageouts_ok = 1;
1202                 } else {
1203                         swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
1204                         swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
1205                         vm_page_count_min(0));
1206                 }
1207
1208                 /*
1209                  * We don't bother paging objects that are "dead".
1210                  * Those objects are in a "rundown" state.
1211                  */
1212                 if (!swap_pageouts_ok ||
1213                     (object == NULL) ||
1214                     (object->flags & OBJ_DEAD)) {
1215                         vm_page_and_queue_spin_lock(m);
1216                         if (m->queue - m->pc == PQ_INACTIVE) {
1217                                 TAILQ_REMOVE(
1218                                     &vm_page_queues[m->queue].pl,
1219                                     m, pageq);
1220                                 TAILQ_INSERT_TAIL(
1221                                     &vm_page_queues[m->queue].pl,
1222                                     m, pageq);
1223                                 ++vm_swapcache_inactive_heuristic;
1224                         }
1225                         vm_page_and_queue_spin_unlock(m);
1226                         vm_page_wakeup(m);
1227                         return 0;
1228                 }
1229
1230                 /*
1231                  * (m) is still busied.
1232                  *
1233                  * The object is already known NOT to be dead.   It
1234                  * is possible for the vget() to block the whole
1235                  * pageout daemon, but the new low-memory handling
1236                  * code should prevent it.
1237                  *
1238                  * The previous code skipped locked vnodes and, worse,
1239                  * reordered pages in the queue.  This results in
1240                  * completely non-deterministic operation because,
1241                  * quite often, a vm_fault has initiated an I/O and
1242                  * is holding a locked vnode at just the point where
1243                  * the pageout daemon is woken up.
1244                  *
1245                  * We can't wait forever for the vnode lock, we might
1246                  * deadlock due to a vn_read() getting stuck in
1247                  * vm_wait while holding this vnode.  We skip the
1248                  * vnode if we can't get it in a reasonable amount
1249                  * of time.
1250                  *
1251                  * vpfailed is used to (try to) avoid the case where
1252                  * a large number of pages are associated with a
1253                  * locked vnode, which could cause the pageout daemon
1254                  * to stall for an excessive amount of time.
1255                  */
1256                 if (object->type == OBJT_VNODE) {
1257                         int flags;
1258
1259                         vp = object->handle;
1260                         flags = LK_EXCLUSIVE;
1261                         if (vp == *vpfailedp)
1262                                 flags |= LK_NOWAIT;
1263                         else
1264                                 flags |= LK_TIMELOCK;
1265                         vm_page_hold(m);
1266                         vm_page_wakeup(m);
1267
1268                         /*
1269                          * We have unbusied (m) temporarily so we can
1270                          * acquire the vp lock without deadlocking.
1271                          * (m) is held to prevent destruction.
1272                          */
1273                         if (vget(vp, flags) != 0) {
1274                                 *vpfailedp = vp;
1275                                 ++pageout_lock_miss;
1276                                 if (object->flags & OBJ_MIGHTBEDIRTY)
1277                                             ++*vnodes_skippedp;
1278                                 vm_page_unhold(m);
1279                                 return 0;
1280                         }
1281
1282                         /*
1283                          * The page might have been moved to another
1284                          * queue during potential blocking in vget()
1285                          * above.  The page might have been freed and
1286                          * reused for another vnode.  The object might
1287                          * have been reused for another vnode.
1288                          */
1289                         if (m->queue - m->pc != PQ_INACTIVE ||
1290                             m->object != object ||
1291                             object->handle != vp) {
1292                                 if (object->flags & OBJ_MIGHTBEDIRTY)
1293                                         ++*vnodes_skippedp;
1294                                 vput(vp);
1295                                 vm_page_unhold(m);
1296                                 return 0;
1297                         }
1298
1299                         /*
1300                          * The page may have been busied during the
1301                          * blocking in vput();  We don't move the
1302                          * page back onto the end of the queue so that
1303                          * statistics are more correct if we don't.
1304                          */
1305                         if (vm_page_busy_try(m, TRUE)) {
1306                                 vput(vp);
1307                                 vm_page_unhold(m);
1308                                 return 0;
1309                         }
1310                         vm_page_unhold(m);
1311
1312                         /*
1313                          * (m) is busied again
1314                          *
1315                          * We own the busy bit and remove our hold
1316                          * bit.  If the page is still held it
1317                          * might be undergoing I/O, so skip it.
1318                          */
1319                         if (m->hold_count) {
1320                                 vm_page_and_queue_spin_lock(m);
1321                                 if (m->queue - m->pc == PQ_INACTIVE) {
1322                                         TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq);
1323                                         TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m, pageq);
1324                                         ++vm_swapcache_inactive_heuristic;
1325                                 }
1326                                 vm_page_and_queue_spin_unlock(m);
1327                                 if (object->flags & OBJ_MIGHTBEDIRTY)
1328                                         ++*vnodes_skippedp;
1329                                 vm_page_wakeup(m);
1330                                 vput(vp);
1331                                 return 0;
1332                         }
1333                         /* (m) is left busied as we fall through */
1334                 }
1335
1336                 /*
1337                  * page is busy and not held here.
1338                  *
1339                  * If a page is dirty, then it is either being washed
1340                  * (but not yet cleaned) or it is still in the
1341                  * laundry.  If it is still in the laundry, then we
1342                  * start the cleaning operation.
1343                  *
1344                  * decrement inactive_shortage on success to account
1345                  * for the (future) cleaned page.  Otherwise we
1346                  * could wind up laundering or cleaning too many
1347                  * pages.
1348                  *
1349                  * NOTE: Cleaning the page here does not cause
1350                  *       force_deficit to be adjusted, because the
1351                  *       page is not being freed or moved to the
1352                  *       cache.
1353                  */
1354                 count = vm_pageout_clean_helper(m, vmflush_flags);
1355                 *max_launderp -= count;
1356
1357                 /*
1358                  * Clean ate busy, page no longer accessible
1359                  */
1360                 if (vp != NULL)
1361                         vput(vp);
1362         } else {
1363                 vm_page_wakeup(m);
1364         }
1365         return count;
1366 }
1367
1368 static int
1369 vm_pageout_scan_active(int pass, int q,
1370                        int avail_shortage, int inactive_shortage,
1371                        int *recycle_countp)
1372 {
1373         struct vm_page marker;
1374         vm_page_t m;
1375         int actcount;
1376         int delta = 0;
1377         int maxscan;
1378
1379         /*
1380          * We want to move pages from the active queue to the inactive
1381          * queue to get the inactive queue to the inactive target.  If
1382          * we still have a page shortage from above we try to directly free
1383          * clean pages instead of moving them.
1384          *
1385          * If we do still have a shortage we keep track of the number of
1386          * pages we free or cache (recycle_count) as a measure of thrashing
1387          * between the active and inactive queues.
1388          *
1389          * If we were able to completely satisfy the free+cache targets
1390          * from the inactive pool we limit the number of pages we move
1391          * from the active pool to the inactive pool to 2x the pages we
1392          * had removed from the inactive pool (with a minimum of 1/5 the
1393          * inactive target).  If we were not able to completely satisfy
1394          * the free+cache targets we go for the whole target aggressively.
1395          *
1396          * NOTE: Both variables can end up negative.
1397          * NOTE: We are still in a critical section.
1398          */
1399
1400         bzero(&marker, sizeof(marker));
1401         marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
1402         marker.queue = PQ_ACTIVE + q;
1403         marker.pc = q;
1404         marker.wire_count = 1;
1405
1406         vm_page_queues_spin_lock(PQ_ACTIVE + q);
1407         TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1408         maxscan = vm_page_queues[PQ_ACTIVE + q].lcnt;
1409
1410         /*
1411          * Queue locked at top of loop to avoid stack marker issues.
1412          */
1413         while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
1414                maxscan-- > 0 && (avail_shortage - delta > 0 ||
1415                                 inactive_shortage > 0))
1416         {
1417                 KKASSERT(m->queue == PQ_ACTIVE + q);
1418                 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl,
1419                              &marker, pageq);
1420                 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
1421                                    &marker, pageq);
1422
1423                 /*
1424                  * Skip marker pages (atomic against other markers to avoid
1425                  * infinite hop-over scans).
1426                  */
1427                 if (m->flags & PG_MARKER)
1428                         continue;
1429
1430                 /*
1431                  * Try to busy the page.  Don't mess with pages which are
1432                  * already busy or reorder them in the queue.
1433                  */
1434                 if (vm_page_busy_try(m, TRUE))
1435                         continue;
1436
1437                 /*
1438                  * Remaining operations run with the page busy and neither
1439                  * the page or the queue will be spin-locked.
1440                  */
1441                 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1442                 KKASSERT(m->queue == PQ_ACTIVE + q);
1443
1444                 /*
1445                  * Don't deactivate pages that are held, even if we can
1446                  * busy them.  (XXX why not?)
1447                  */
1448                 if (m->hold_count != 0) {
1449                         vm_page_and_queue_spin_lock(m);
1450                         if (m->queue - m->pc == PQ_ACTIVE) {
1451                                 TAILQ_REMOVE(
1452                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1453                                         m, pageq);
1454                                 TAILQ_INSERT_TAIL(
1455                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1456                                         m, pageq);
1457                         }
1458                         vm_page_and_queue_spin_unlock(m);
1459                         vm_page_wakeup(m);
1460                         goto next;
1461                 }
1462
1463                 /*
1464                  * The count for pagedaemon pages is done after checking the
1465                  * page for eligibility...
1466                  */
1467                 mycpu->gd_cnt.v_pdpages++;
1468
1469                 /*
1470                  * Check to see "how much" the page has been used and clear
1471                  * the tracking access bits.  If the object has no references
1472                  * don't bother paying the expense.
1473                  */
1474                 actcount = 0;
1475                 if (m->object && m->object->ref_count != 0) {
1476                         if (m->flags & PG_REFERENCED)
1477                                 ++actcount;
1478                         actcount += pmap_ts_referenced(m);
1479                         if (actcount) {
1480                                 m->act_count += ACT_ADVANCE + actcount;
1481                                 if (m->act_count > ACT_MAX)
1482                                         m->act_count = ACT_MAX;
1483                         }
1484                 }
1485                 vm_page_flag_clear(m, PG_REFERENCED);
1486
1487                 /*
1488                  * actcount is only valid if the object ref_count is non-zero.
1489                  * If the page does not have an object, actcount will be zero.
1490                  */
1491                 if (actcount && m->object->ref_count != 0) {
1492                         vm_page_and_queue_spin_lock(m);
1493                         if (m->queue - m->pc == PQ_ACTIVE) {
1494                                 TAILQ_REMOVE(
1495                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1496                                         m, pageq);
1497                                 TAILQ_INSERT_TAIL(
1498                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1499                                         m, pageq);
1500                         }
1501                         vm_page_and_queue_spin_unlock(m);
1502                         vm_page_wakeup(m);
1503                 } else {
1504                         switch(m->object->type) {
1505                         case OBJT_DEFAULT:
1506                         case OBJT_SWAP:
1507                                 m->act_count -= min(m->act_count,
1508                                                     vm_anonmem_decline);
1509                                 break;
1510                         default:
1511                                 m->act_count -= min(m->act_count,
1512                                                     vm_filemem_decline);
1513                                 break;
1514                         }
1515                         if (vm_pageout_algorithm ||
1516                             (m->object == NULL) ||
1517                             (m->object && (m->object->ref_count == 0)) ||
1518                             m->act_count < pass + 1
1519                         ) {
1520                                 /*
1521                                  * Deactivate the page.  If we had a
1522                                  * shortage from our inactive scan try to
1523                                  * free (cache) the page instead.
1524                                  *
1525                                  * Don't just blindly cache the page if
1526                                  * we do not have a shortage from the
1527                                  * inactive scan, that could lead to
1528                                  * gigabytes being moved.
1529                                  */
1530                                 --inactive_shortage;
1531                                 if (avail_shortage - delta > 0 ||
1532                                     (m->object && (m->object->ref_count == 0)))
1533                                 {
1534                                         if (avail_shortage - delta > 0)
1535                                                 ++*recycle_countp;
1536                                         vm_page_protect(m, VM_PROT_NONE);
1537                                         if (m->dirty == 0 &&
1538                                             (m->flags & PG_NEED_COMMIT) == 0 &&
1539                                             avail_shortage - delta > 0) {
1540                                                 vm_page_cache(m);
1541                                         } else {
1542                                                 vm_page_deactivate(m);
1543                                                 vm_page_wakeup(m);
1544                                         }
1545                                 } else {
1546                                         vm_page_deactivate(m);
1547                                         vm_page_wakeup(m);
1548                                 }
1549                                 ++delta;
1550                         } else {
1551                                 vm_page_and_queue_spin_lock(m);
1552                                 if (m->queue - m->pc == PQ_ACTIVE) {
1553                                         TAILQ_REMOVE(
1554                                             &vm_page_queues[PQ_ACTIVE + q].pl,
1555                                             m, pageq);
1556                                         TAILQ_INSERT_TAIL(
1557                                             &vm_page_queues[PQ_ACTIVE + q].pl,
1558                                             m, pageq);
1559                                 }
1560                                 vm_page_and_queue_spin_unlock(m);
1561                                 vm_page_wakeup(m);
1562                         }
1563                 }
1564 next:
1565                 lwkt_yield();
1566                 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1567         }
1568
1569         /*
1570          * Clean out our local marker.
1571          *
1572          * Page queue still spin-locked.
1573          */
1574         TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1575         vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1576
1577         return (delta);
1578 }
1579
1580 /*
1581  * The number of actually free pages can drop down to v_free_reserved,
1582  * we try to build the free count back above v_free_min.  Note that
1583  * vm_paging_needed() also returns TRUE if v_free_count is not at
1584  * least v_free_min so that is the minimum we must build the free
1585  * count to.
1586  *
1587  * We use a slightly higher target to improve hysteresis,
1588  * ((v_free_target + v_free_min) / 2).  Since v_free_target
1589  * is usually the same as v_cache_min this maintains about
1590  * half the pages in the free queue as are in the cache queue,
1591  * providing pretty good pipelining for pageout operation.
1592  *
1593  * The system operator can manipulate vm.v_cache_min and
1594  * vm.v_free_target to tune the pageout demon.  Be sure
1595  * to keep vm.v_free_min < vm.v_free_target.
1596  *
1597  * Note that the original paging target is to get at least
1598  * (free_min + cache_min) into (free + cache).  The slightly
1599  * higher target will shift additional pages from cache to free
1600  * without effecting the original paging target in order to
1601  * maintain better hysteresis and not have the free count always
1602  * be dead-on v_free_min.
1603  *
1604  * NOTE: we are still in a critical section.
1605  *
1606  * Pages moved from PQ_CACHE to totally free are not counted in the
1607  * pages_freed counter.
1608  */
1609 static void
1610 vm_pageout_scan_cache(int avail_shortage, int pass,
1611                       int vnodes_skipped, int recycle_count)
1612 {
1613         static int lastkillticks;
1614         struct vm_pageout_scan_info info;
1615         vm_page_t m;
1616
1617         while (vmstats.v_free_count <
1618                (vmstats.v_free_min + vmstats.v_free_target) / 2) {
1619                 /*
1620                  * This steals some code from vm/vm_page.c
1621                  */
1622                 static int cache_rover = 0;
1623
1624                 m = vm_page_list_find(PQ_CACHE,
1625                                       cache_rover & PQ_L2_MASK, FALSE);
1626                 if (m == NULL)
1627                         break;
1628                 /* page is returned removed from its queue and spinlocked */
1629                 if (vm_page_busy_try(m, TRUE)) {
1630                         vm_page_deactivate_locked(m);
1631                         vm_page_spin_unlock(m);
1632                         continue;
1633                 }
1634                 vm_page_spin_unlock(m);
1635                 pagedaemon_wakeup();
1636                 lwkt_yield();
1637
1638                 /*
1639                  * Remaining operations run with the page busy and neither
1640                  * the page or the queue will be spin-locked.
1641                  */
1642                 if ((m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) ||
1643                     m->hold_count ||
1644                     m->wire_count) {
1645                         vm_page_deactivate(m);
1646                         vm_page_wakeup(m);
1647                         continue;
1648                 }
1649                 KKASSERT((m->flags & PG_MAPPED) == 0);
1650                 KKASSERT(m->dirty == 0);
1651                 cache_rover += PQ_PRIME2;
1652                 vm_pageout_page_free(m);
1653                 mycpu->gd_cnt.v_dfree++;
1654         }
1655
1656 #if !defined(NO_SWAPPING)
1657         /*
1658          * Idle process swapout -- run once per second.
1659          */
1660         if (vm_swap_idle_enabled) {
1661                 static time_t lsec;
1662                 if (time_uptime != lsec) {
1663                         atomic_set_int(&vm_pageout_req_swapout, VM_SWAP_IDLE);
1664                         vm_req_vmdaemon();
1665                         lsec = time_uptime;
1666                 }
1667         }
1668 #endif
1669                 
1670         /*
1671          * If we didn't get enough free pages, and we have skipped a vnode
1672          * in a writeable object, wakeup the sync daemon.  And kick swapout
1673          * if we did not get enough free pages.
1674          */
1675         if (vm_paging_target() > 0) {
1676                 if (vnodes_skipped && vm_page_count_min(0))
1677                         speedup_syncer(NULL);
1678 #if !defined(NO_SWAPPING)
1679                 if (vm_swap_enabled && vm_page_count_target()) {
1680                         atomic_set_int(&vm_pageout_req_swapout, VM_SWAP_NORMAL);
1681                         vm_req_vmdaemon();
1682                 }
1683 #endif
1684         }
1685
1686         /*
1687          * Handle catastrophic conditions.  Under good conditions we should
1688          * be at the target, well beyond our minimum.  If we could not even
1689          * reach our minimum the system is under heavy stress.  But just being
1690          * under heavy stress does not trigger process killing.
1691          *
1692          * We consider ourselves to have run out of memory if the swap pager
1693          * is full and avail_shortage is still positive.  The secondary check
1694          * ensures that we do not kill processes if the instantanious
1695          * availability is good, even if the pageout demon pass says it
1696          * couldn't get to the target.
1697          */
1698         if (swap_pager_almost_full &&
1699             pass > 0 &&
1700             (vm_page_count_min(recycle_count) || avail_shortage > 0)) {
1701                 kprintf("Warning: system low on memory+swap "
1702                         "shortage %d for %d ticks!\n",
1703                         avail_shortage, ticks - swap_fail_ticks);
1704         }
1705         if (swap_pager_full &&
1706             pass > 1 &&
1707             avail_shortage > 0 &&
1708             vm_paging_target() > 0 &&
1709             (unsigned int)(ticks - lastkillticks) >= hz) {
1710                 /*
1711                  * Kill something, maximum rate once per second to give
1712                  * the process time to free up sufficient memory.
1713                  */
1714                 lastkillticks = ticks;
1715                 info.bigproc = NULL;
1716                 info.bigsize = 0;
1717                 allproc_scan(vm_pageout_scan_callback, &info);
1718                 if (info.bigproc != NULL) {
1719                         info.bigproc->p_nice = PRIO_MIN;
1720                         info.bigproc->p_usched->resetpriority(
1721                                 FIRST_LWP_IN_PROC(info.bigproc));
1722                         atomic_set_int(&info.bigproc->p_flags, P_LOWMEMKILL);
1723                         killproc(info.bigproc, "out of swap space");
1724                         wakeup(&vmstats.v_free_count);
1725                         PRELE(info.bigproc);
1726                 }
1727         }
1728 }
1729
1730 static int
1731 vm_pageout_scan_callback(struct proc *p, void *data)
1732 {
1733         struct vm_pageout_scan_info *info = data;
1734         vm_offset_t size;
1735
1736         /*
1737          * Never kill system processes or init.  If we have configured swap
1738          * then try to avoid killing low-numbered pids.
1739          */
1740         if ((p->p_flags & P_SYSTEM) || (p->p_pid == 1) ||
1741             ((p->p_pid < 48) && (vm_swap_size != 0))) {
1742                 return (0);
1743         }
1744
1745         lwkt_gettoken(&p->p_token);
1746
1747         /*
1748          * if the process is in a non-running type state,
1749          * don't touch it.
1750          */
1751         if (p->p_stat != SACTIVE && p->p_stat != SSTOP && p->p_stat != SCORE) {
1752                 lwkt_reltoken(&p->p_token);
1753                 return (0);
1754         }
1755
1756         /*
1757          * Get the approximate process size.  Note that anonymous pages
1758          * with backing swap will be counted twice, but there should not
1759          * be too many such pages due to the stress the VM system is
1760          * under at this point.
1761          */
1762         size = vmspace_anonymous_count(p->p_vmspace) +
1763                 vmspace_swap_count(p->p_vmspace);
1764
1765         /*
1766          * If the this process is bigger than the biggest one
1767          * remember it.
1768          */
1769         if (info->bigsize < size) {
1770                 if (info->bigproc)
1771                         PRELE(info->bigproc);
1772                 PHOLD(p);
1773                 info->bigproc = p;
1774                 info->bigsize = size;
1775         }
1776         lwkt_reltoken(&p->p_token);
1777         lwkt_yield();
1778
1779         return(0);
1780 }
1781
1782 /*
1783  * This routine tries to maintain the pseudo LRU active queue,
1784  * so that during long periods of time where there is no paging,
1785  * that some statistic accumulation still occurs.  This code
1786  * helps the situation where paging just starts to occur.
1787  */
1788 static void
1789 vm_pageout_page_stats(int q)
1790 {
1791         static int fullintervalcount = 0;
1792         struct vm_page marker;
1793         vm_page_t m;
1794         int pcount, tpcount;            /* Number of pages to check */
1795         int page_shortage;
1796
1797         page_shortage = (vmstats.v_inactive_target + vmstats.v_cache_max +
1798                          vmstats.v_free_min) -
1799                         (vmstats.v_free_count + vmstats.v_inactive_count +
1800                          vmstats.v_cache_count);
1801
1802         if (page_shortage <= 0)
1803                 return;
1804
1805         pcount = vm_page_queues[PQ_ACTIVE + q].lcnt;
1806         fullintervalcount += vm_pageout_stats_interval;
1807         if (fullintervalcount < vm_pageout_full_stats_interval) {
1808                 tpcount = (vm_pageout_stats_max * pcount) /
1809                           vmstats.v_page_count + 1;
1810                 if (pcount > tpcount)
1811                         pcount = tpcount;
1812         } else {
1813                 fullintervalcount = 0;
1814         }
1815
1816         bzero(&marker, sizeof(marker));
1817         marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
1818         marker.queue = PQ_ACTIVE + q;
1819         marker.pc = q;
1820         marker.wire_count = 1;
1821
1822         vm_page_queues_spin_lock(PQ_ACTIVE + q);
1823         TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1824
1825         /*
1826          * Queue locked at top of loop to avoid stack marker issues.
1827          */
1828         while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
1829                pcount-- > 0)
1830         {
1831                 int actcount;
1832
1833                 KKASSERT(m->queue == PQ_ACTIVE + q);
1834                 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1835                 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
1836                                    &marker, pageq);
1837
1838                 /*
1839                  * Skip marker pages (atomic against other markers to avoid
1840                  * infinite hop-over scans).
1841                  */
1842                 if (m->flags & PG_MARKER)
1843                         continue;
1844
1845                 /*
1846                  * Ignore pages we can't busy
1847                  */
1848                 if (vm_page_busy_try(m, TRUE))
1849                         continue;
1850
1851                 /*
1852                  * Remaining operations run with the page busy and neither
1853                  * the page or the queue will be spin-locked.
1854                  */
1855                 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1856                 KKASSERT(m->queue == PQ_ACTIVE + q);
1857
1858                 /*
1859                  * We now have a safely busied page, the page and queue
1860                  * spinlocks have been released.
1861                  *
1862                  * Ignore held pages
1863                  */
1864                 if (m->hold_count) {
1865                         vm_page_wakeup(m);
1866                         goto next;
1867                 }
1868
1869                 /*
1870                  * Calculate activity
1871                  */
1872                 actcount = 0;
1873                 if (m->flags & PG_REFERENCED) {
1874                         vm_page_flag_clear(m, PG_REFERENCED);
1875                         actcount += 1;
1876                 }
1877                 actcount += pmap_ts_referenced(m);
1878
1879                 /*
1880                  * Update act_count and move page to end of queue.
1881                  */
1882                 if (actcount) {
1883                         m->act_count += ACT_ADVANCE + actcount;
1884                         if (m->act_count > ACT_MAX)
1885                                 m->act_count = ACT_MAX;
1886                         vm_page_and_queue_spin_lock(m);
1887                         if (m->queue - m->pc == PQ_ACTIVE) {
1888                                 TAILQ_REMOVE(
1889                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1890                                         m, pageq);
1891                                 TAILQ_INSERT_TAIL(
1892                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1893                                         m, pageq);
1894                         }
1895                         vm_page_and_queue_spin_unlock(m);
1896                         vm_page_wakeup(m);
1897                         goto next;
1898                 }
1899
1900                 if (m->act_count == 0) {
1901                         /*
1902                          * We turn off page access, so that we have
1903                          * more accurate RSS stats.  We don't do this
1904                          * in the normal page deactivation when the
1905                          * system is loaded VM wise, because the
1906                          * cost of the large number of page protect
1907                          * operations would be higher than the value
1908                          * of doing the operation.
1909                          *
1910                          * We use the marker to save our place so
1911                          * we can release the spin lock.  both (m)
1912                          * and (next) will be invalid.
1913                          */
1914                         vm_page_protect(m, VM_PROT_NONE);
1915                         vm_page_deactivate(m);
1916                 } else {
1917                         m->act_count -= min(m->act_count, ACT_DECLINE);
1918                         vm_page_and_queue_spin_lock(m);
1919                         if (m->queue - m->pc == PQ_ACTIVE) {
1920                                 TAILQ_REMOVE(
1921                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1922                                         m, pageq);
1923                                 TAILQ_INSERT_TAIL(
1924                                         &vm_page_queues[PQ_ACTIVE + q].pl,
1925                                         m, pageq);
1926                         }
1927                         vm_page_and_queue_spin_unlock(m);
1928                 }
1929                 vm_page_wakeup(m);
1930 next:
1931                 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1932         }
1933
1934         /*
1935          * Remove our local marker
1936          *
1937          * Page queue still spin-locked.
1938          */
1939         TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1940         vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1941 }
1942
1943 static int
1944 vm_pageout_free_page_calc(vm_size_t count)
1945 {
1946         if (count < vmstats.v_page_count)
1947                  return 0;
1948         /*
1949          * free_reserved needs to include enough for the largest swap pager
1950          * structures plus enough for any pv_entry structs when paging.
1951          *
1952          * v_free_min           normal allocations
1953          * v_free_reserved      system allocations
1954          * v_pageout_free_min   allocations by pageout daemon
1955          * v_interrupt_free_min low level allocations (e.g swap structures)
1956          */
1957         if (vmstats.v_page_count > 1024)
1958                 vmstats.v_free_min = 64 + (vmstats.v_page_count - 1024) / 200;
1959         else
1960                 vmstats.v_free_min = 64;
1961         vmstats.v_free_reserved = vmstats.v_free_min * 4 / 8 + 7;
1962         vmstats.v_free_severe = vmstats.v_free_min * 4 / 8 + 0;
1963         vmstats.v_pageout_free_min = vmstats.v_free_min * 2 / 8 + 7;
1964         vmstats.v_interrupt_free_min = vmstats.v_free_min * 1 / 8 + 7;
1965
1966         return 1;
1967 }
1968
1969
1970 /*
1971  * vm_pageout is the high level pageout daemon.
1972  *
1973  * No requirements.
1974  */
1975 static void
1976 vm_pageout_thread(void)
1977 {
1978         int pass;
1979         int q;
1980         int q1iterator = 0;
1981         int q2iterator = 0;
1982
1983         /*
1984          * Initialize some paging parameters.
1985          */
1986         curthread->td_flags |= TDF_SYSTHREAD;
1987
1988         vm_pageout_free_page_calc(vmstats.v_page_count);
1989
1990         /*
1991          * v_free_target and v_cache_min control pageout hysteresis.  Note
1992          * that these are more a measure of the VM cache queue hysteresis
1993          * then the VM free queue.  Specifically, v_free_target is the
1994          * high water mark (free+cache pages).
1995          *
1996          * v_free_reserved + v_cache_min (mostly means v_cache_min) is the
1997          * low water mark, while v_free_min is the stop.  v_cache_min must
1998          * be big enough to handle memory needs while the pageout daemon
1999          * is signalled and run to free more pages.
2000          */
2001         if (vmstats.v_free_count > 6144)
2002                 vmstats.v_free_target = 4 * vmstats.v_free_min + vmstats.v_free_reserved;
2003         else
2004                 vmstats.v_free_target = 2 * vmstats.v_free_min + vmstats.v_free_reserved;
2005
2006         /*
2007          * NOTE: With the new buffer cache b_act_count we want the default
2008          *       inactive target to be a percentage of available memory.
2009          *
2010          *       The inactive target essentially determines the minimum
2011          *       number of 'temporary' pages capable of caching one-time-use
2012          *       files when the VM system is otherwise full of pages
2013          *       belonging to multi-time-use files or active program data.
2014          *
2015          * NOTE: The inactive target is aggressively persued only if the
2016          *       inactive queue becomes too small.  If the inactive queue
2017          *       is large enough to satisfy page movement to free+cache
2018          *       then it is repopulated more slowly from the active queue.
2019          *       This allows a general inactive_target default to be set.
2020          *
2021          *       There is an issue here for processes which sit mostly idle
2022          *       'overnight', such as sshd, tcsh, and X.  Any movement from
2023          *       the active queue will eventually cause such pages to
2024          *       recycle eventually causing a lot of paging in the morning.
2025          *       To reduce the incidence of this pages cycled out of the
2026          *       buffer cache are moved directly to the inactive queue if
2027          *       they were only used once or twice.
2028          *
2029          *       The vfs.vm_cycle_point sysctl can be used to adjust this.
2030          *       Increasing the value (up to 64) increases the number of
2031          *       buffer recyclements which go directly to the inactive queue.
2032          */
2033         if (vmstats.v_free_count > 2048) {
2034                 vmstats.v_cache_min = vmstats.v_free_target;
2035                 vmstats.v_cache_max = 2 * vmstats.v_cache_min;
2036         } else {
2037                 vmstats.v_cache_min = 0;
2038                 vmstats.v_cache_max = 0;
2039         }
2040         vmstats.v_inactive_target = vmstats.v_free_count / 4;
2041
2042         /* XXX does not really belong here */
2043         if (vm_page_max_wired == 0)
2044                 vm_page_max_wired = vmstats.v_free_count / 3;
2045
2046         if (vm_pageout_stats_max == 0)
2047                 vm_pageout_stats_max = vmstats.v_free_target;
2048
2049         /*
2050          * Set interval in seconds for stats scan.
2051          */
2052         if (vm_pageout_stats_interval == 0)
2053                 vm_pageout_stats_interval = 5;
2054         if (vm_pageout_full_stats_interval == 0)
2055                 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
2056         
2057
2058         /*
2059          * Set maximum free per pass
2060          */
2061         if (vm_pageout_stats_free_max == 0)
2062                 vm_pageout_stats_free_max = 5;
2063
2064         swap_pager_swap_init();
2065         pass = 0;
2066
2067         /*
2068          * The pageout daemon is never done, so loop forever.
2069          */
2070         while (TRUE) {
2071                 int error;
2072                 int avail_shortage;
2073                 int inactive_shortage;
2074                 int vnodes_skipped = 0;
2075                 int recycle_count = 0;
2076                 int tmp;
2077
2078                 /*
2079                  * Wait for an action request.  If we timeout check to
2080                  * see if paging is needed (in case the normal wakeup
2081                  * code raced us).
2082                  */
2083                 if (vm_pages_needed == 0) {
2084                         error = tsleep(&vm_pages_needed,
2085                                        0, "psleep",
2086                                        vm_pageout_stats_interval * hz);
2087                         if (error &&
2088                             vm_paging_needed() == 0 &&
2089                             vm_pages_needed == 0) {
2090                                 for (q = 0; q < PQ_L2_SIZE; ++q)
2091                                         vm_pageout_page_stats(q);
2092                                 continue;
2093                         }
2094                         vm_pages_needed = 1;
2095                 }
2096
2097                 mycpu->gd_cnt.v_pdwakeups++;
2098
2099                 /*
2100                  * Scan for INACTIVE->CLEAN/PAGEOUT
2101                  *
2102                  * This routine tries to avoid thrashing the system with
2103                  * unnecessary activity.
2104                  *
2105                  * Calculate our target for the number of free+cache pages we
2106                  * want to get to.  This is higher then the number that causes
2107                  * allocations to stall (severe) in order to provide hysteresis,
2108                  * and if we don't make it all the way but get to the minimum
2109                  * we're happy.  Goose it a bit if there are multiple requests
2110                  * for memory.
2111                  *
2112                  * Don't reduce avail_shortage inside the loop or the
2113                  * PQAVERAGE() calculation will break.
2114                  *
2115                  * NOTE! deficit is differentiated from avail_shortage as
2116                  *       REQUIRING at least (deficit) pages to be cleaned,
2117                  *       even if the page queues are in good shape.  This
2118                  *       is used primarily for handling per-process
2119                  *       RLIMIT_RSS and may also see small values when
2120                  *       processes block due to low memory.
2121                  */
2122                 avail_shortage = vm_paging_target() + vm_pageout_deficit;
2123                 vm_pageout_deficit = 0;
2124
2125                 if (avail_shortage > 0) {
2126                         int delta = 0;
2127
2128                         for (q = 0; q < PQ_L2_SIZE; ++q) {
2129                                 delta += vm_pageout_scan_inactive(
2130                                             pass,
2131                                             (q + q1iterator) & PQ_L2_MASK,
2132                                             PQAVERAGE(avail_shortage),
2133                                             &vnodes_skipped);
2134                                 if (avail_shortage - delta <= 0)
2135                                         break;
2136                         }
2137                         avail_shortage -= delta;
2138                         q1iterator = q + 1;
2139                 }
2140
2141                 /*
2142                  * Figure out how many active pages we must deactivate.  If
2143                  * we were able to reach our target with just the inactive
2144                  * scan above we limit the number of active pages we
2145                  * deactivate to reduce unnecessary work.
2146                  */
2147                 inactive_shortage = vmstats.v_inactive_target -
2148                                     vmstats.v_inactive_count;
2149
2150                 /*
2151                  * If we were unable to free sufficient inactive pages to
2152                  * satisfy the free/cache queue requirements then simply
2153                  * reaching the inactive target may not be good enough.
2154                  * Try to deactivate pages in excess of the target based
2155                  * on the shortfall.
2156                  *
2157                  * However to prevent thrashing the VM system do not
2158                  * deactivate more than an additional 1/10 the inactive
2159                  * target's worth of active pages.
2160                  */
2161                 if (avail_shortage > 0) {
2162                         tmp = avail_shortage * 2;
2163                         if (tmp > vmstats.v_inactive_target / 10)
2164                                 tmp = vmstats.v_inactive_target / 10;
2165                         inactive_shortage += tmp;
2166                 }
2167
2168                 /*
2169                  * Only trigger a pmap cleanup on inactive shortage.
2170                  */
2171                 if (inactive_shortage > 0) {
2172                         pmap_collect();
2173                 }
2174
2175                 /*
2176                  * Scan for ACTIVE->INACTIVE
2177                  *
2178                  * Only trigger on inactive shortage.  Triggering on
2179                  * avail_shortage can starve the active queue with
2180                  * unnecessary active->inactive transitions and destroy
2181                  * performance.
2182                  */
2183                 if (/*avail_shortage > 0 ||*/ inactive_shortage > 0) {
2184                         int delta = 0;
2185
2186                         for (q = 0; q < PQ_L2_SIZE; ++q) {
2187                                 delta += vm_pageout_scan_active(
2188                                                 pass,
2189                                                 (q + q2iterator) & PQ_L2_MASK,
2190                                                 PQAVERAGE(avail_shortage),
2191                                                 PQAVERAGE(inactive_shortage),
2192                                                 &recycle_count);
2193                                 if (inactive_shortage - delta <= 0 &&
2194                                     avail_shortage - delta <= 0) {
2195                                         break;
2196                                 }
2197                         }
2198                         inactive_shortage -= delta;
2199                         avail_shortage -= delta;
2200                         q2iterator = q + 1;
2201                 }
2202
2203                 /*
2204                  * Scan for CACHE->FREE
2205                  *
2206                  * Finally free enough cache pages to meet our free page
2207                  * requirement and take more drastic measures if we are
2208                  * still in trouble.
2209                  */
2210                 vm_pageout_scan_cache(avail_shortage, pass,
2211                                       vnodes_skipped, recycle_count);
2212
2213                 /*
2214                  * Wait for more work.
2215                  */
2216                 if (avail_shortage > 0) {
2217                         ++pass;
2218                         if (pass < 10 && vm_pages_needed > 1) {
2219                                 /*
2220                                  * Normal operation, additional processes
2221                                  * have already kicked us.  Retry immediately
2222                                  * unless swap space is completely full in
2223                                  * which case delay a bit.
2224                                  */
2225                                 if (swap_pager_full) {
2226                                         tsleep(&vm_pages_needed, 0, "pdelay",
2227                                                 hz / 5);
2228                                 } /* else immediate retry */
2229                         } else if (pass < 10) {
2230                                 /*
2231                                  * Normal operation, fewer processes.  Delay
2232                                  * a bit but allow wakeups.
2233                                  */
2234                                 vm_pages_needed = 0;
2235                                 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
2236                                 vm_pages_needed = 1;
2237                         } else if (swap_pager_full == 0) {
2238                                 /*
2239                                  * We've taken too many passes, forced delay.
2240                                  */
2241                                 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
2242                         } else {
2243                                 /*
2244                                  * Running out of memory, catastrophic
2245                                  * back-off to one-second intervals.
2246                                  */
2247                                 tsleep(&vm_pages_needed, 0, "pdelay", hz);
2248                         }
2249                 } else if (vm_pages_needed) {
2250                         /*
2251                          * Interlocked wakeup of waiters (non-optional).
2252                          *
2253                          * Similar to vm_page_free_wakeup() in vm_page.c,
2254                          * wake
2255                          */
2256                         pass = 0;
2257                         if (!vm_page_count_min(vm_page_free_hysteresis) ||
2258                             !vm_page_count_target()) {
2259                                 vm_pages_needed = 0;
2260                                 wakeup(&vmstats.v_free_count);
2261                         }
2262                 } else {
2263                         pass = 0;
2264                 }
2265         }
2266 }
2267
2268 static struct kproc_desc page_kp = {
2269         "pagedaemon",
2270         vm_pageout_thread,
2271         &pagethread
2272 };
2273 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp);
2274
2275
2276 /*
2277  * Called after allocating a page out of the cache or free queue
2278  * to possibly wake the pagedaemon up to replentish our supply.
2279  *
2280  * We try to generate some hysteresis by waking the pagedaemon up
2281  * when our free+cache pages go below the free_min+cache_min level.
2282  * The pagedaemon tries to get the count back up to at least the
2283  * minimum, and through to the target level if possible.
2284  *
2285  * If the pagedaemon is already active bump vm_pages_needed as a hint
2286  * that there are even more requests pending.
2287  *
2288  * SMP races ok?
2289  * No requirements.
2290  */
2291 void
2292 pagedaemon_wakeup(void)
2293 {
2294         if (vm_paging_needed() && curthread != pagethread) {
2295                 if (vm_pages_needed == 0) {
2296                         vm_pages_needed = 1;    /* SMP race ok */
2297                         wakeup(&vm_pages_needed);
2298                 } else if (vm_page_count_min(0)) {
2299                         ++vm_pages_needed;      /* SMP race ok */
2300                 }
2301         }
2302 }
2303
2304 #if !defined(NO_SWAPPING)
2305
2306 /*
2307  * SMP races ok?
2308  * No requirements.
2309  */
2310 static void
2311 vm_req_vmdaemon(void)
2312 {
2313         static int lastrun = 0;
2314
2315         if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
2316                 wakeup(&vm_daemon_needed);
2317                 lastrun = ticks;
2318         }
2319 }
2320
2321 static int vm_daemon_callback(struct proc *p, void *data __unused);
2322
2323 /*
2324  * No requirements.
2325  */
2326 static void
2327 vm_daemon(void)
2328 {
2329         int req_swapout;
2330
2331         while (TRUE) {
2332                 tsleep(&vm_daemon_needed, 0, "psleep", 0);
2333                 req_swapout = atomic_swap_int(&vm_pageout_req_swapout, 0);
2334
2335                 /*
2336                  * forced swapouts
2337                  */
2338                 if (req_swapout)
2339                         swapout_procs(vm_pageout_req_swapout);
2340
2341                 /*
2342                  * scan the processes for exceeding their rlimits or if
2343                  * process is swapped out -- deactivate pages
2344                  */
2345                 allproc_scan(vm_daemon_callback, NULL);
2346         }
2347 }
2348
2349 static int
2350 vm_daemon_callback(struct proc *p, void *data __unused)
2351 {
2352         struct vmspace *vm;
2353         vm_pindex_t limit, size;
2354
2355         /*
2356          * if this is a system process or if we have already
2357          * looked at this process, skip it.
2358          */
2359         lwkt_gettoken(&p->p_token);
2360
2361         if (p->p_flags & (P_SYSTEM | P_WEXIT)) {
2362                 lwkt_reltoken(&p->p_token);
2363                 return (0);
2364         }
2365
2366         /*
2367          * if the process is in a non-running type state,
2368          * don't touch it.
2369          */
2370         if (p->p_stat != SACTIVE && p->p_stat != SSTOP && p->p_stat != SCORE) {
2371                 lwkt_reltoken(&p->p_token);
2372                 return (0);
2373         }
2374
2375         /*
2376          * get a limit
2377          */
2378         limit = OFF_TO_IDX(qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
2379                                 p->p_rlimit[RLIMIT_RSS].rlim_max));
2380
2381         /*
2382          * let processes that are swapped out really be
2383          * swapped out.  Set the limit to nothing to get as
2384          * many pages out to swap as possible.
2385          */
2386         if (p->p_flags & P_SWAPPEDOUT)
2387                 limit = 0;
2388
2389         vm = p->p_vmspace;
2390         vmspace_hold(vm);
2391         size = pmap_resident_tlnw_count(&vm->vm_pmap);
2392         if (limit >= 0 && size >= limit) {
2393                 vm_pageout_map_deactivate_pages(&vm->vm_map, limit);
2394         }
2395         vmspace_drop(vm);
2396
2397         lwkt_reltoken(&p->p_token);
2398
2399         return (0);
2400 }
2401
2402 #endif