From 7ab26241a9129d73605c7fad365769264ed06366 Mon Sep 17 00:00:00 2001 From: Matthew Dillon Date: Tue, 31 Jan 2012 20:52:46 -0800 Subject: [PATCH] kernel - Improve pageout daemon & memory pressure detection w/tmpfs * PG_NEED_COMMIT pages are unwired from the buffer cache they are now unconditionally placed on the active queue, even if 'activate' is not set. * This results in non-freeable tmpfs pages remaining in the active queue most of the time. They will be cycled into the inactive queue and flushed out if swap is present as per normal pageout daemon operation, When swap is not present or is full, these pages are reactivated unconditionally. --- sys/vm/vm_page.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 8156a762f7..90e5f6edc0 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -2154,6 +2154,10 @@ vm_page_wire(vm_page_t m) * processes. This optimization causes one-time-use metadata to be * reused more quickly. * + * Pages marked PG_NEED_COMMIT are always activated and never placed on + * the inactive queue. This helps the pageout daemon determine memory + * pressure and act on out-of-memory situations more quickly. + * * BUT, if we are in a low-memory situation we have no choice but to * put clean pages on the cache queue. * @@ -2178,7 +2182,7 @@ vm_page_unwire(vm_page_t m, int activate) atomic_add_int(&vmstats.v_wire_count, -1); if (m->flags & PG_UNMANAGED) { ; - } else if (activate) { + } else if (activate || (m->flags & PG_NEED_COMMIT)) { vm_page_spin_lock(m); _vm_page_add_queue_spinlocked(m, PQ_ACTIVE + m->pc, 0); -- 2.41.0