From 4ecf7cc9c0bfba854db12b3d9cc529b0e705b65e Mon Sep 17 00:00:00 2001 From: Matthew Dillon Date: Tue, 1 Jul 2008 02:02:56 +0000 Subject: [PATCH] Fix numerous pageout daemon -> buffer cache deadlocks in the main system. These issues usually only occur on systems with small amounts of ram but it is possible to trigger them on any system. * Get rid of the IO_NOBWILL hack. Just have the VN device use IO_DIRECT, which will clean out the buffer on completion of the write. * Add a timeout argument to vm_wait(). * Add a thread->td_flags flag called TDF_SYSTHREAD. kmalloc()'s made from designated threads are allowed to dip into the system reserve when allocating pages. Only the pageout daemon and buf_daemon[_hw] use the flag. * Add a new static procedure, recoverbufpages(), which explicitly tries to free buffers and their backing pages on the clean queue. * Add a new static procedure, bio_page_alloc(), to do all the nasty work of allocating a page on behalf of a buffer cache buffer. This function will call vm_page_alloc() with VM_ALLOC_SYSTEM to allow it to dip into the system reserve. If the allocation fails this function will call recoverbufpages() to try to recycle from VM pages from clean buffer cache buffers, and will then attempt to reallocate using VM_ALLOC_SYSTEM | VM_ALLOC_INTERRUPT to allow it to dip into the interrupt reserve as well. Warnings will blare on the console. If the effort still fails we sleep for 1/20 of a second and retry. The idea though is for all the effort above to not result in a failure at the end. Reported-by: Gergo Szakal --- sys/dev/disk/vn/vn.c | 4 +- sys/kern/kern_slaballoc.c | 8 +- sys/kern/uipc_syscalls.c | 4 +- sys/kern/vfs_bio.c | 231 +++++++++++++++++++++++++++++++++----- sys/sys/thread.h | 4 +- sys/sys/vnode.h | 8 +- sys/vm/swap_pager.c | 4 +- sys/vm/vm_fault.c | 4 +- sys/vm/vm_glue.c | 6 +- sys/vm/vm_page.c | 15 +-- sys/vm/vm_pageout.c | 3 +- sys/vm/vm_pageout.h | 4 +- 12 files changed, 234 insertions(+), 61 deletions(-) diff --git a/sys/dev/disk/vn/vn.c b/sys/dev/disk/vn/vn.c index e253eff27b..1a35ec0d16 100644 --- a/sys/dev/disk/vn/vn.c +++ b/sys/dev/disk/vn/vn.c @@ -39,7 +39,7 @@ * * from: @(#)vn.c 8.6 (Berkeley) 4/1/94 * $FreeBSD: src/sys/dev/vn/vn.c,v 1.105.2.4 2001/11/18 07:11:00 dillon Exp $ - * $DragonFly: src/sys/dev/disk/vn/vn.c,v 1.37 2008/04/22 18:46:50 dillon Exp $ + * $DragonFly: src/sys/dev/disk/vn/vn.c,v 1.38 2008/07/01 02:02:53 dillon Exp $ */ /* @@ -393,7 +393,7 @@ vnstrategy(struct dev_strategy_args *ap) if (bp->b_cmd == BUF_CMD_READ) error = VOP_READ(vn->sc_vp, &auio, IO_DIRECT, vn->sc_cred); else - error = VOP_WRITE(vn->sc_vp, &auio, IO_NOBWILL, vn->sc_cred); + error = VOP_WRITE(vn->sc_vp, &auio, IO_DIRECT, vn->sc_cred); vn_unlock(vn->sc_vp); bp->b_resid = auio.uio_resid; if (error) { diff --git a/sys/kern/kern_slaballoc.c b/sys/kern/kern_slaballoc.c index 819079cbc6..d5e22fd629 100644 --- a/sys/kern/kern_slaballoc.c +++ b/sys/kern/kern_slaballoc.c @@ -33,7 +33,7 @@ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $DragonFly: src/sys/kern/kern_slaballoc.c,v 1.53 2008/06/30 03:00:36 dillon Exp $ + * $DragonFly: src/sys/kern/kern_slaballoc.c,v 1.54 2008/07/01 02:02:54 dillon Exp $ * * This module implements a slab allocator drop-in replacement for the * kernel malloc(). @@ -1120,8 +1120,8 @@ kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) * * VM_ALLOC_SYSTEM is automatically set if we are preempting and * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is - * implied in this case), though I'm sure if we really need to do - * that. + * implied in this case), though I'm not sure if we really need to + * do that. */ vmflags = base_vmflags; if (flags & M_WAITOK) { @@ -1150,7 +1150,7 @@ kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) vm_map_lock(&kernel_map); } else { vm_map_unlock(&kernel_map); - vm_wait(); + vm_wait(0); vm_map_lock(&kernel_map); } i -= PAGE_SIZE; /* retry */ diff --git a/sys/kern/uipc_syscalls.c b/sys/kern/uipc_syscalls.c index 1390345e51..f48b43374d 100644 --- a/sys/kern/uipc_syscalls.c +++ b/sys/kern/uipc_syscalls.c @@ -35,7 +35,7 @@ * * @(#)uipc_syscalls.c 8.4 (Berkeley) 2/21/94 * $FreeBSD: src/sys/kern/uipc_syscalls.c,v 1.65.2.17 2003/04/04 17:11:16 tegge Exp $ - * $DragonFly: src/sys/kern/uipc_syscalls.c,v 1.85 2008/04/14 12:01:50 dillon Exp $ + * $DragonFly: src/sys/kern/uipc_syscalls.c,v 1.86 2008/07/01 02:02:54 dillon Exp $ */ #include "opt_ktrace.h" @@ -1580,7 +1580,7 @@ retry_lookup: if (pg == NULL) { pg = vm_page_alloc(obj, pindex, VM_ALLOC_NORMAL); if (pg == NULL) { - vm_wait(); + vm_wait(0); crit_exit(); goto retry_lookup; } diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c index 804aeec929..2a7dfbf54a 100644 --- a/sys/kern/vfs_bio.c +++ b/sys/kern/vfs_bio.c @@ -12,7 +12,7 @@ * John S. Dyson. * * $FreeBSD: src/sys/kern/vfs_bio.c,v 1.242.2.20 2003/05/28 18:38:10 alc Exp $ - * $DragonFly: src/sys/kern/vfs_bio.c,v 1.108 2008/06/30 02:11:53 dillon Exp $ + * $DragonFly: src/sys/kern/vfs_bio.c,v 1.109 2008/07/01 02:02:54 dillon Exp $ */ /* @@ -100,6 +100,7 @@ static void vfs_clean_pages(struct buf *bp); static void vfs_setdirty(struct buf *bp); static void vfs_vmio_release(struct buf *bp); static int flushbufqueues(bufq_type_t q); +static vm_page_t bio_page_alloc(vm_object_t obj, vm_pindex_t pg, int deficit); static void bd_signal(int totalspace); static void buf_daemon(void); @@ -126,6 +127,7 @@ int dirtybufspace, dirtybufspacehw, lodirtybufspace, hidirtybufspace; int runningbufspace, runningbufcount; static int getnewbufcalls; static int getnewbufrestarts; +static int recoverbufcalls; static int needsbuffer; /* locked by needsbuffer_spin */ static int bd_request; /* locked by needsbuffer_spin */ static int bd_request_hw; /* locked by needsbuffer_spin */ @@ -133,6 +135,10 @@ static u_int bd_wake_ary[BD_WAKE_SIZE]; static u_int bd_wake_index; static struct spinlock needsbuffer_spin; +static struct thread *bufdaemon_td; +static struct thread *bufdaemonhw_td; + + /* * Sysctls for operational control of the buffer cache. */ @@ -173,6 +179,8 @@ SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RD, &getnewbufcalls, 0, "New buffer header acquisition requests"); SYSCTL_INT(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RD, &getnewbufrestarts, 0, "New buffer header acquisition restarts"); +SYSCTL_INT(_vfs, OID_AUTO, recoverbufcalls, CTLFLAG_RD, &recoverbufcalls, 0, + "Recover VM space in an emergency"); SYSCTL_INT(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RD, &bufdefragcnt, 0, "Buffer acquisition restarts due to fragmented buffer map"); SYSCTL_INT(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RD, &buffreekvacnt, 0, @@ -382,6 +390,9 @@ bd_wait(int totalspace) u_int i; int count; + if (curthread == bufdaemonhw_td || curthread == bufdaemon_td) + return; + while (totalspace > 0) { bd_heatup(); crit_enter(); @@ -1899,6 +1910,117 @@ restart: return(bp); } +/* + * This routine is called in an emergency to recover VM pages from the + * buffer cache by cashing in clean buffers. The idea is to recover + * enough pages to be able to satisfy a stuck bio_page_alloc(). + */ +static int +recoverbufpages(void) +{ + struct buf *bp; + int bytes = 0; + + ++recoverbufcalls; + + while (bytes < MAXBSIZE) { + bp = TAILQ_FIRST(&bufqueues[BQUEUE_CLEAN]); + if (bp == NULL) + break; + + /* + * BQUEUE_CLEAN - B_AGE special case. If not set the bp + * cycles through the queue twice before being selected. + */ + if ((bp->b_flags & B_AGE) == 0 && TAILQ_NEXT(bp, b_freelist)) { + bp->b_flags |= B_AGE; + TAILQ_REMOVE(&bufqueues[BQUEUE_CLEAN], bp, b_freelist); + TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_CLEAN], + bp, b_freelist); + continue; + } + + /* + * Sanity Checks + */ + KKASSERT(bp->b_qindex == BQUEUE_CLEAN); + KKASSERT((bp->b_flags & B_DELWRI) == 0); + + /* + * Start freeing the bp. This is somewhat involved. + * + * Buffers on the clean list must be disassociated from + * their current vnode + */ + + if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { + kprintf("recoverbufpages: warning, locked buf %p, race corrected\n", bp); + tsleep(&bd_request, 0, "gnbxxx", hz / 100); + continue; + } + if (bp->b_qindex != BQUEUE_CLEAN) { + kprintf("recoverbufpages: warning, BUF_LOCK blocked unexpectedly on buf %p index %d, race corrected\n", bp, bp->b_qindex); + BUF_UNLOCK(bp); + continue; + } + bremfree(bp); + + /* + * Dependancies must be handled before we disassociate the + * vnode. + * + * NOTE: HAMMER will set B_LOCKED if the buffer cannot + * be immediately disassociated. HAMMER then becomes + * responsible for releasing the buffer. + */ + if (LIST_FIRST(&bp->b_dep) != NULL) { + buf_deallocate(bp); + if (bp->b_flags & B_LOCKED) { + bqrelse(bp); + continue; + } + KKASSERT(LIST_FIRST(&bp->b_dep) == NULL); + } + + bytes += bp->b_bufsize; + + if (bp->b_flags & B_VMIO) { + bp->b_flags &= ~B_ASYNC; + bp->b_flags |= B_DIRECT; /* try to free pages */ + vfs_vmio_release(bp); + } + if (bp->b_vp) + brelvp(bp); + + KKASSERT(bp->b_vp == NULL); + KKASSERT((bp->b_flags & B_HASHED) == 0); + + /* + * critical section protection is not required when + * scrapping a buffer's contents because it is already + * wired. + */ + if (bp->b_bufsize) + allocbuf(bp, 0); + + bp->b_flags = B_BNOCLIP; + bp->b_cmd = BUF_CMD_DONE; + bp->b_vp = NULL; + bp->b_error = 0; + bp->b_resid = 0; + bp->b_bcount = 0; + bp->b_xio.xio_npages = 0; + bp->b_dirtyoff = bp->b_dirtyend = 0; + reinitbufbio(bp); + KKASSERT(LIST_FIRST(&bp->b_dep) == NULL); + buf_dep_init(bp); + bp->b_flags |= B_INVAL; + /* bfreekva(bp); */ + brelse(bp); + } + return(bytes); +} + /* * buf_daemon: * @@ -1911,9 +2033,6 @@ restart: * waiting at the mid-point. */ -static struct thread *bufdaemon_td; -static struct thread *bufdaemonhw_td; - static struct kproc_desc buf_kp = { "bufdaemon", buf_daemon, @@ -1940,6 +2059,7 @@ buf_daemon(void) */ EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, bufdaemon_td, SHUTDOWN_PRI_LAST); + curthread->td_flags |= TDF_SYSTHREAD; /* * This process is allowed to take the buffer cache to the limit @@ -1992,6 +2112,7 @@ buf_daemon_hw(void) */ EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, bufdaemonhw_td, SHUTDOWN_PRI_LAST); + curthread->td_flags |= TDF_SYSTHREAD; /* * This process is allowed to take the buffer cache to the limit @@ -2751,12 +2872,8 @@ allocbuf(struct buf *bp, int size) * with paging I/O, no matter which * process we are. */ - m = vm_page_alloc(obj, pi, VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM); - if (m == NULL) { - vm_wait(); - vm_pageout_deficit += desiredpages - - bp->b_xio.xio_npages; - } else { + m = bio_page_alloc(obj, pi, desiredpages - bp->b_xio.xio_npages); + if (m) { vm_page_wire(m); vm_page_wakeup(m); bp->b_flags &= ~B_CACHE; @@ -3506,33 +3623,91 @@ vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to) from = round_page(from); index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; - for (pg = from; pg < to; pg += PAGE_SIZE, index++) { - -tryagain: - + pg = from; + while (pg < to) { /* * Note: must allocate system pages since blocking here * could intefere with paging I/O, no matter which * process we are. */ - p = vm_page_alloc(&kernel_object, - (pg >> PAGE_SHIFT), - VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM); - if (!p) { - vm_pageout_deficit += (to - from) >> PAGE_SHIFT; - vm_wait(); - goto tryagain; - } - vm_page_wire(p); - p->valid = VM_PAGE_BITS_ALL; - vm_page_flag_clear(p, PG_ZERO); - pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); - bp->b_xio.xio_pages[index] = p; - vm_page_wakeup(p); + p = bio_page_alloc(&kernel_object, pg >> PAGE_SHIFT, + (vm_pindex_t)((to - pg) >> PAGE_SHIFT)); + if (p) { + vm_page_wire(p); + p->valid = VM_PAGE_BITS_ALL; + vm_page_flag_clear(p, PG_ZERO); + pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); + bp->b_xio.xio_pages[index] = p; + vm_page_wakeup(p); + + pg += PAGE_SIZE; + ++index; + } } bp->b_xio.xio_npages = index; } +/* + * Allocate pages for a buffer cache buffer. + * + * Under extremely severe memory conditions even allocating out of the + * system reserve can fail. If this occurs we must allocate out of the + * interrupt reserve to avoid a deadlock with the pageout daemon. + * + * The pageout daemon can run (putpages -> VOP_WRITE -> getblk -> allocbuf). + * If the buffer cache's vm_page_alloc() fails a vm_wait() can deadlock + * against the pageout daemon if pages are not freed from other sources. + */ +static +vm_page_t +bio_page_alloc(vm_object_t obj, vm_pindex_t pg, int deficit) +{ + vm_page_t p; + + /* + * Try a normal allocation, allow use of system reserve. + */ + p = vm_page_alloc(obj, pg, VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM); + if (p) + return(p); + + /* + * The normal allocation failed and we clearly have a page + * deficit. Try to reclaim some clean VM pages directly + * from the buffer cache. + */ + vm_pageout_deficit += deficit; + recoverbufpages(); + + /* + * We may have blocked, the caller will know what to do if the + * page now exists. + */ + if (vm_page_lookup(obj, pg)) + return(NULL); + + /* + * Allocate and allow use of the interrupt reserve. + * + * If after all that we still can't allocate a VM page we are + * in real trouble, but we slog on anyway hoping that the system + * won't deadlock. + */ + p = vm_page_alloc(obj, pg, VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM | + VM_ALLOC_INTERRUPT); + if (p) { + kprintf("bio_page_alloc: WARNING emergency page " + "allocation\n"); + if (vm_page_count_severe()) + vm_wait(hz / 20); + } else { + kprintf("bio_page_alloc: WARNING emergency page " + "allocation failed\n"); + vm_wait(hz * 5); + } + return(p); +} + /* * vm_hold_free_pages: * diff --git a/sys/sys/thread.h b/sys/sys/thread.h index 2fbe8fd8c6..4971519734 100644 --- a/sys/sys/thread.h +++ b/sys/sys/thread.h @@ -7,7 +7,7 @@ * Types which must already be defined when this header is included by * userland: struct md_thread * - * $DragonFly: src/sys/sys/thread.h,v 1.93 2008/05/26 17:11:09 nth Exp $ + * $DragonFly: src/sys/sys/thread.h,v 1.94 2008/07/01 02:02:55 dillon Exp $ */ #ifndef _SYS_THREAD_H_ @@ -266,7 +266,7 @@ struct thread { #define TDF_SINTR 0x0040 /* interruptability hint for 'ps' */ #define TDF_TSLEEPQ 0x0080 /* on a tsleep wait queue */ -#define TDF_SYSTHREAD 0x0100 /* system thread */ +#define TDF_SYSTHREAD 0x0100 /* allocations may use reserve */ #define TDF_ALLOCATED_THREAD 0x0200 /* objcache allocated thread */ #define TDF_ALLOCATED_STACK 0x0400 /* objcache allocated stack */ #define TDF_VERBOSE 0x0800 /* verbose on exit */ diff --git a/sys/sys/vnode.h b/sys/sys/vnode.h index 2de42418d4..15e357fb62 100644 --- a/sys/sys/vnode.h +++ b/sys/sys/vnode.h @@ -32,7 +32,7 @@ * * @(#)vnode.h 8.7 (Berkeley) 2/4/94 * $FreeBSD: src/sys/sys/vnode.h,v 1.111.2.19 2002/12/29 18:19:53 dillon Exp $ - * $DragonFly: src/sys/sys/vnode.h,v 1.80 2008/05/18 21:47:05 dillon Exp $ + * $DragonFly: src/sys/sys/vnode.h,v 1.81 2008/07/01 02:02:55 dillon Exp $ */ #ifndef _SYS_VNODE_H_ @@ -295,10 +295,6 @@ struct vnode { /* * Flags for ioflag. (high 16 bits used to ask for read-ahead and * help with write clustering) - * - * IO_NOBWILL - tells the system not to call bwillwrite() and potentially - * block when too many dirty buffers are present. This is - * used by recursive subsystems such as VN which might deadlock. */ #define IO_UNIT 0x0001 /* do I/O as atomic unit */ #define IO_APPEND 0x0002 /* append write to end */ @@ -309,7 +305,7 @@ struct vnode { #define IO_INVAL 0x0040 /* invalidate after I/O */ #define IO_ASYNC 0x0080 /* bawrite rather then bdwrite */ #define IO_DIRECT 0x0100 /* attempt to bypass buffer cache */ -#define IO_NOBWILL 0x0200 /* do not block w/ bwillread/write */ +#define IO_UNUSED0200 0x0200 #define IO_CORE 0x0400 /* I/O is part of core dump */ #define IO_SEQMAX 0x7F /* seq heuristic max value */ diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c index 8248a753a8..ebf5d6b642 100644 --- a/sys/vm/swap_pager.c +++ b/sys/vm/swap_pager.c @@ -96,7 +96,7 @@ * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94 * * $FreeBSD: src/sys/vm/swap_pager.c,v 1.130.2.12 2002/08/31 21:15:55 dillon Exp $ - * $DragonFly: src/sys/vm/swap_pager.c,v 1.31 2008/05/09 07:24:48 dillon Exp $ + * $DragonFly: src/sys/vm/swap_pager.c,v 1.32 2008/07/01 02:02:56 dillon Exp $ */ #include @@ -1903,7 +1903,7 @@ retry: swap = *pswap = zalloc(swap_zone); if (swap == NULL) { - vm_wait(); + vm_wait(0); goto retry; } swap->swb_hnext = NULL; diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c index eb3c6a64ae..e075c2dce6 100644 --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -67,7 +67,7 @@ * rights to redistribute these changes. * * $FreeBSD: src/sys/vm/vm_fault.c,v 1.108.2.8 2002/02/26 05:49:27 silby Exp $ - * $DragonFly: src/sys/vm/vm_fault.c,v 1.46 2008/05/09 07:24:48 dillon Exp $ + * $DragonFly: src/sys/vm/vm_fault.c,v 1.47 2008/07/01 02:02:56 dillon Exp $ */ /* @@ -1649,7 +1649,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map, dst_m = vm_page_alloc(dst_object, OFF_TO_IDX(dst_offset), VM_ALLOC_NORMAL); if (dst_m == NULL) { - vm_wait(); + vm_wait(0); } } while (dst_m == NULL); diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c index 93d2224d7c..14db5d1594 100644 --- a/sys/vm/vm_glue.c +++ b/sys/vm/vm_glue.c @@ -60,7 +60,7 @@ * rights to redistribute these changes. * * $FreeBSD: src/sys/vm/vm_glue.c,v 1.94.2.4 2003/01/13 22:51:17 dillon Exp $ - * $DragonFly: src/sys/vm/vm_glue.c,v 1.55 2007/04/30 07:18:57 dillon Exp $ + * $DragonFly: src/sys/vm/vm_glue.c,v 1.56 2008/07/01 02:02:56 dillon Exp $ */ #include "opt_vm.h" @@ -239,7 +239,7 @@ vm_fork(struct proc *p1, struct proc *p2, int flags) } while (vm_page_count_severe()) { - vm_wait(); + vm_wait(0); } if ((flags & RFMEM) == 0) { @@ -345,7 +345,7 @@ loop: * Don't try to swap anything in if we are low on memory. */ if (vm_page_count_min()) { - vm_wait(); + vm_wait(0); goto loop; } diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 176bcb8e65..3b35e202db 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -35,7 +35,7 @@ * * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 * $FreeBSD: src/sys/vm/vm_page.c,v 1.147.2.18 2002/03/10 05:03:19 alc Exp $ - * $DragonFly: src/sys/vm/vm_page.c,v 1.38 2008/05/09 07:24:48 dillon Exp $ + * $DragonFly: src/sys/vm/vm_page.c,v 1.39 2008/07/01 02:02:56 dillon Exp $ */ /* @@ -678,9 +678,10 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int page_req) (VM_ALLOC_NORMAL|VM_ALLOC_INTERRUPT|VM_ALLOC_SYSTEM)); /* - * The pager is allowed to eat deeper into the free page list. + * Certain system threads (pageout daemon, buf_daemon's) are + * allowed to eat deeper into the free page list. */ - if (curthread == pagethread) + if (curthread->td_flags & TDF_SYSTHREAD) page_req |= VM_ALLOC_SYSTEM; crit_enter(); @@ -804,18 +805,18 @@ loop: * places before memory allocations. */ void -vm_wait(void) +vm_wait(int timo) { crit_enter(); if (curthread == pagethread) { vm_pageout_pages_needed = 1; - tsleep(&vm_pageout_pages_needed, 0, "VMWait", 0); + tsleep(&vm_pageout_pages_needed, 0, "VMWait", timo); } else { if (!vm_pages_needed) { vm_pages_needed = 1; wakeup(&vm_pages_needed); } - tsleep(&vmstats.v_free_count, 0, "vmwait", 0); + tsleep(&vmstats.v_free_count, 0, "vmwait", timo); } crit_exit(); } @@ -1387,7 +1388,7 @@ retrylookup: } m = vm_page_alloc(object, pindex, allocflags & ~VM_ALLOC_RETRY); if (m == NULL) { - vm_wait(); + vm_wait(0); if ((allocflags & VM_ALLOC_RETRY) == 0) goto done; goto retrylookup; diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c index 17ac52338f..e6d825597d 100644 --- a/sys/vm/vm_pageout.c +++ b/sys/vm/vm_pageout.c @@ -66,7 +66,7 @@ * rights to redistribute these changes. * * $FreeBSD: src/sys/vm/vm_pageout.c,v 1.151.2.15 2002/12/29 18:21:04 dillon Exp $ - * $DragonFly: src/sys/vm/vm_pageout.c,v 1.35 2008/05/09 07:24:48 dillon Exp $ + * $DragonFly: src/sys/vm/vm_pageout.c,v 1.36 2008/07/01 02:02:56 dillon Exp $ */ /* @@ -1384,6 +1384,7 @@ vm_pageout(void) /* * Initialize some paging parameters. */ + curthread->td_flags |= TDF_SYSTHREAD; vmstats.v_interrupt_free_min = 2; if (vmstats.v_page_count < 2000) diff --git a/sys/vm/vm_pageout.h b/sys/vm/vm_pageout.h index 12ce8697da..7caf8906de 100644 --- a/sys/vm/vm_pageout.h +++ b/sys/vm/vm_pageout.h @@ -62,7 +62,7 @@ * rights to redistribute these changes. * * $FreeBSD: src/sys/vm/vm_pageout.h,v 1.26.2.1 2002/02/26 05:49:28 silby Exp $ - * $DragonFly: src/sys/vm/vm_pageout.h,v 1.7 2006/05/20 02:42:15 dillon Exp $ + * $DragonFly: src/sys/vm/vm_pageout.h,v 1.8 2008/07/01 02:02:56 dillon Exp $ */ #ifndef _VM_VM_PAGEOUT_H_ @@ -107,7 +107,7 @@ extern int vm_load_debug; */ extern void pagedaemon_wakeup (void); -extern void vm_wait (void); +extern void vm_wait (int timo); extern void vm_waitpfault (void); #ifdef _KERNEL -- 2.41.0