From: Matthew Dillon Date: Sat, 3 Jul 2010 18:19:26 +0000 (-0700) Subject: kernel - MPSAFE work - Finish tokenizing vm_page.c X-Git-Tag: v2.9.0~806 X-Git-Url: https://gitweb.dragonflybsd.org/dragonfly.git/commitdiff_plain/573fb415e09e5db00b870784ff951af262c0e60d kernel - MPSAFE work - Finish tokenizing vm_page.c * Finish tokenizing vm_page.c * Certain global procedures, particular vm_page_hold() and vm_page_unhold(), are best called with the vm_token already held for implied non-blocking operation. --- diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c index b3d847c08e..8e4078204d 100644 --- a/sys/kern/kern_exec.c +++ b/sys/kern/kern_exec.c @@ -607,6 +607,7 @@ exec_map_page(struct image_params *imgp, vm_pindex_t pageno, */ m = vm_page_grab(object, pageno, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); crit_enter(); + lwkt_gettoken(&vm_token); while ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) { ma = m; @@ -625,12 +626,14 @@ exec_map_page(struct image_params *imgp, vm_pindex_t pageno, vm_page_protect(m, VM_PROT_NONE); vnode_pager_freepage(m); } + lwkt_reltoken(&vm_token); crit_exit(); return EIO; } } - vm_page_hold(m); + vm_page_hold(m); /* requires vm_token to be held */ vm_page_wakeup(m); /* unbusy the page */ + lwkt_reltoken(&vm_token); crit_exit(); *plwb = lwbuf_alloc(m); diff --git a/sys/kern/kern_xio.c b/sys/kern/kern_xio.c index 0961f3112d..f2bbe91dbc 100644 --- a/sys/kern/kern_xio.c +++ b/sys/kern/kern_xio.c @@ -177,13 +177,13 @@ xio_init_kbuf(xio_t xio, void *kbase, size_t kbytes) xio->xio_error = 0; if ((n = PAGE_SIZE - xio->xio_offset) > kbytes) n = kbytes; + lwkt_gettoken(&vm_token); + crit_enter(); for (i = 0; n && i < XIO_INTERNAL_PAGES; ++i) { if ((paddr = pmap_kextract(addr)) == 0) break; - crit_enter(); m = PHYS_TO_VM_PAGE(paddr); vm_page_hold(m); - crit_exit(); xio->xio_pages[i] = m; kbytes -= n; xio->xio_bytes += n; @@ -191,6 +191,8 @@ xio_init_kbuf(xio_t xio, void *kbase, size_t kbytes) n = PAGE_SIZE; addr += PAGE_SIZE; } + crit_exit(); + lwkt_reltoken(&vm_token); xio->xio_npages = i; /* @@ -221,12 +223,14 @@ xio_init_pages(xio_t xio, struct vm_page **mbase, int npages, int xflags) xio->xio_pages = xio->xio_internal_pages; xio->xio_npages = npages; xio->xio_error = 0; + lwkt_gettoken(&vm_token); crit_enter(); for (i = 0; i < npages; ++i) { vm_page_hold(mbase[i]); xio->xio_pages[i] = mbase[i]; } crit_exit(); + lwkt_reltoken(&vm_token); return(0); } @@ -240,6 +244,7 @@ xio_release(xio_t xio) int i; vm_page_t m; + lwkt_gettoken(&vm_token); crit_enter(); for (i = 0; i < xio->xio_npages; ++i) { m = xio->xio_pages[i]; @@ -248,6 +253,7 @@ xio_release(xio_t xio) vm_page_unhold(m); } crit_exit(); + lwkt_reltoken(&vm_token); xio->xio_offset = 0; xio->xio_npages = 0; xio->xio_bytes = 0; diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c index fba43243ba..26d7a8b61f 100644 --- a/sys/kern/vfs_bio.c +++ b/sys/kern/vfs_bio.c @@ -1680,6 +1680,7 @@ vfs_vmio_release(struct buf *bp) int i; vm_page_t m; + lwkt_gettoken(&vm_token); crit_enter(); for (i = 0; i < bp->b_xio.xio_npages; i++) { m = bp->b_xio.xio_pages[i]; @@ -1743,6 +1744,7 @@ vfs_vmio_release(struct buf *bp) } } crit_exit(); + lwkt_reltoken(&vm_token); pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_xio.xio_npages); if (bp->b_bufsize) { bufspacewakeup(); @@ -4360,8 +4362,10 @@ vm_hold_free_pages(struct buf *bp, vm_offset_t from, vm_offset_t to) from = round_page(from); to = round_page(to); - newnpages = index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; + index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; + newnpages = index; + lwkt_gettoken(&vm_token); for (pg = from; pg < to; pg += PAGE_SIZE, index++) { p = bp->b_xio.xio_pages[index]; if (p && (index < bp->b_xio.xio_npages)) { @@ -4379,6 +4383,7 @@ vm_hold_free_pages(struct buf *bp, vm_offset_t from, vm_offset_t to) } } bp->b_xio.xio_npages = newnpages; + lwkt_reltoken(&vm_token); } /* diff --git a/sys/kern/vfs_vm.c b/sys/kern/vfs_vm.c index 04b50b5b35..6975be4bb3 100644 --- a/sys/kern/vfs_vm.c +++ b/sys/kern/vfs_vm.c @@ -436,6 +436,7 @@ nvnode_pager_setsize(struct vnode *vp, off_t length, int blksize, int boff) * invalidated. */ pi = OFF_TO_IDX(length + PAGE_MASK); + lwkt_gettoken(&vm_token); while (pi < nobjsize) { do { m = vm_page_lookup(object, pi); @@ -447,6 +448,7 @@ nvnode_pager_setsize(struct vnode *vp, off_t length, int blksize, int boff) } ++pi; } + lwkt_reltoken(&vm_token); } else { /* * File has expanded. diff --git a/sys/platform/vkernel/platform/copyio.c b/sys/platform/vkernel/platform/copyio.c index f1e15e5233..9bea25e67a 100644 --- a/sys/platform/vkernel/platform/copyio.c +++ b/sys/platform/vkernel/platform/copyio.c @@ -148,8 +148,8 @@ copyin(const void *udaddr, void *kaddr, size_t len) len -= n; udaddr = (const char *)udaddr + n; kaddr = (char *)kaddr + n; - vm_page_unhold(m); lwbuf_free(lwb); + vm_page_unhold(m); } rel_mplock(); return (error); @@ -187,8 +187,8 @@ copyout(const void *kaddr, void *udaddr, size_t len) udaddr = (char *)udaddr + n; kaddr = (const char *)kaddr + n; vm_page_dirty(m); - vm_page_unhold(m); lwbuf_free(lwb); + vm_page_unhold(m); } rel_mplock(); return (error); diff --git a/sys/platform/vkernel64/platform/copyio.c b/sys/platform/vkernel64/platform/copyio.c index 89d1d0470c..b6b3dc6fc9 100644 --- a/sys/platform/vkernel64/platform/copyio.c +++ b/sys/platform/vkernel64/platform/copyio.c @@ -148,8 +148,8 @@ copyin(const void *udaddr, void *kaddr, size_t len) len -= n; udaddr = (const char *)udaddr + n; kaddr = (char *)kaddr + n; - vm_page_unhold(m); lwbuf_free(lwb); + vm_page_unhold(m); } rel_mplock(); return (error); @@ -187,8 +187,8 @@ copyout(const void *kaddr, void *udaddr, size_t len) udaddr = (char *)udaddr + n; kaddr = (const char *)kaddr + n; vm_page_dirty(m); - vm_page_unhold(m); lwbuf_free(lwb); + vm_page_unhold(m); } rel_mplock(); return (error); diff --git a/sys/vfs/procfs/procfs_mem.c b/sys/vfs/procfs/procfs_mem.c index f62a6ead4b..d043c0873e 100644 --- a/sys/vfs/procfs/procfs_mem.c +++ b/sys/vfs/procfs/procfs_mem.c @@ -150,9 +150,7 @@ procfs_rwmem(struct proc *curp, struct proc *p, struct uio *uio) /* * release the page and we are done */ - crit_enter(); vm_page_unhold(m); - crit_exit(); } while (error == 0 && uio->uio_resid > 0); kmem_free(&kernel_map, kva, PAGE_SIZE); diff --git a/sys/vm/device_pager.c b/sys/vm/device_pager.c index ba971e2e8c..55874807a6 100644 --- a/sys/vm/device_pager.c +++ b/sys/vm/device_pager.c @@ -210,10 +210,12 @@ dev_pager_getpage(vm_object_t object, vm_page_t *mpp, int seqaccess) page = dev_pager_getfake(paddr); TAILQ_INSERT_TAIL(&object->un_pager.devp.devp_pglist, page, pageq); + lwkt_gettoken(&vm_token); crit_enter(); vm_page_free(*mpp); vm_page_insert(page, object, offset); crit_exit(); + lwkt_reltoken(&vm_token); } mtx_unlock(&dev_pager_mtx); return (VM_PAGER_OK); diff --git a/sys/vm/phys_pager.c b/sys/vm/phys_pager.c index 07f3727721..4656384747 100644 --- a/sys/vm/phys_pager.c +++ b/sys/vm/phys_pager.c @@ -81,6 +81,7 @@ phys_pager_getpage(vm_object_t object, vm_page_t *mpp, int seqaccess) { vm_page_t m = *mpp; + lwkt_gettoken(&vm_token); crit_enter(); if ((m->flags & PG_ZERO) == 0) vm_page_zero_fill(m); @@ -90,6 +91,7 @@ phys_pager_getpage(vm_object_t object, vm_page_t *mpp, int seqaccess) m->valid = VM_PAGE_BITS_ALL; m->dirty = 0; crit_exit(); + lwkt_reltoken(&vm_token); return (VM_PAGER_OK); } diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c index 1b228cb1b1..54d0dd800e 100644 --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -636,6 +636,7 @@ RetryFault: * (so we don't want to lose the fact that the page will be dirtied * if a write fault was specified). */ + lwkt_gettoken(&vm_token); vm_page_hold(fs.m); vm_page_flag_clear(fs.m, PG_ZERO); if (fault_type & VM_PROT_WRITE) @@ -653,7 +654,6 @@ RetryFault: * Unbusy the page by activating it. It remains held and will not * be reclaimed. */ - lwkt_gettoken(&vm_token); vm_page_activate(fs.m); if (curthread->td_lwp) { diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 953ef3e764..048fde07ee 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -350,26 +350,51 @@ rb_vm_page_compare(struct vm_page *p1, struct vm_page *p2) return(0); } +/* + * Holding a page keeps it from being reused. Other parts of the system + * can still disassociate the page from its current object and free it, or + * perform read or write I/O on it and/or otherwise manipulate the page, + * but if the page is held the VM system will leave the page and its data + * intact and not reuse the page for other purposes until the last hold + * reference is released. (see vm_page_wire() if you want to prevent the + * page from being disassociated from its object too). + * + * The caller must hold vm_token. + * + * The caller must still validate the contents of the page and, if necessary, + * wait for any pending I/O (e.g. vm_page_sleep_busy() loop) to complete + * before manipulating the page. + */ +void +vm_page_hold(vm_page_t m) +{ + ASSERT_LWKT_TOKEN_HELD(&vm_token); + ++m->hold_count; +} + /* * The opposite of vm_page_hold(). A page can be freed while being held, * which places it on the PQ_HOLD queue. We must call vm_page_free_toq() * in this case to actually free it once the hold count drops to 0. * - * This routine must be called at splvm(). + * The caller must hold vm_token if non-blocking operation is desired, + * but otherwise does not need to. */ void -vm_page_unhold(vm_page_t mem) +vm_page_unhold(vm_page_t m) { - --mem->hold_count; - KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!")); - if (mem->hold_count == 0 && mem->queue == PQ_HOLD) { - vm_page_busy(mem); - vm_page_free_toq(mem); + lwkt_gettoken(&vm_token); + --m->hold_count; + KASSERT(m->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!")); + if (m->hold_count == 0 && m->queue == PQ_HOLD) { + vm_page_busy(m); + vm_page_free_toq(m); } + lwkt_reltoken(&vm_token); } /* - * Inserts the given mem entry into the object and object list. + * Inserts the given vm_page into the object and object list. * * The pagetables are not updated but will presumably fault the page * in if necessary, or if a kernel page the caller will at some point @@ -377,12 +402,14 @@ vm_page_unhold(vm_page_t mem) * here so we *can't* do this anyway. * * This routine may not block. + * This routine must be called with the vm_token held. * This routine must be called with a critical section held. */ void vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) { ASSERT_IN_CRIT_SECTION(); + ASSERT_LWKT_TOKEN_HELD(&vm_token); if (m->object != NULL) panic("vm_page_insert: already inserted"); @@ -465,16 +492,7 @@ vm_page_remove(vm_page_t m) * Locate and return the page at (object, pindex), or NULL if the * page could not be found. * - * This routine will operate properly without spl protection, but - * the returned page could be in flux if it is busy. Because an - * interrupt can race a caller's busy check (unbusying and freeing the - * page we return before the caller is able to check the busy bit), - * the caller should generally call this routine with a critical - * section held. - * - * Callers may call this routine without spl protection if they know - * 'for sure' that the page will not be ripped out from under them - * by an interrupt. + * The caller must hold vm_token if non-blocking operation is desired. */ vm_page_t vm_page_lookup(vm_object_t object, vm_pindex_t pindex) @@ -537,7 +555,7 @@ vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex) * is being moved between queues or otherwise is to remain BUSYied by the * caller. * - * This routine must be called at splhigh(). + * The caller must hold vm_token * This routine may not block. */ void @@ -546,6 +564,7 @@ vm_page_unqueue_nowakeup(vm_page_t m) int queue = m->queue; struct vpgqueues *pq; + ASSERT_LWKT_TOKEN_HELD(&vm_token); if (queue != PQ_NONE) { pq = &vm_page_queues[queue]; m->queue = PQ_NONE; @@ -559,7 +578,7 @@ vm_page_unqueue_nowakeup(vm_page_t m) * vm_page_unqueue() - Remove a page from its queue, wakeup the pagedemon * if necessary. * - * This routine must be called at splhigh(). + * The caller must hold vm_token * This routine may not block. */ void @@ -568,6 +587,7 @@ vm_page_unqueue(vm_page_t m) int queue = m->queue; struct vpgqueues *pq; + ASSERT_LWKT_TOKEN_HELD(&vm_token); if (queue != PQ_NONE) { m->queue = PQ_NONE; pq = &vm_page_queues[queue]; @@ -589,7 +609,7 @@ vm_page_unqueue(vm_page_t m) * caches. We need this optimization because cpu caches tend to be * physical caches, while object spaces tend to be virtual. * - * This routine must be called at splvm(). + * Must be called with vm_token held. * This routine may not block. * * Note that this routine is carefully inlined. A non-inlined version @@ -636,6 +656,10 @@ _vm_page_list_find2(int basequeue, int index) return(m); } +/* + * Must be called with vm_token held if the caller desired non-blocking + * operation and a stable result. + */ vm_page_t vm_page_list_find(int basequeue, int index, boolean_t prefer_zero) { @@ -647,14 +671,15 @@ vm_page_list_find(int basequeue, int index, boolean_t prefer_zero) * might be found, but not applicable, they are deactivated. This * keeps us from using potentially busy cached pages. * - * This routine must be called with a critical section held. * This routine may not block. + * Must be called with vm_token held. */ vm_page_t vm_page_select_cache(vm_object_t object, vm_pindex_t pindex) { vm_page_t m; + ASSERT_LWKT_TOKEN_HELD(&vm_token); while (TRUE) { m = _vm_page_list_find( PQ_CACHE, @@ -1137,11 +1162,13 @@ vm_page_free_fromq_fast(void) * mappings. * * Must be called with a critical section held. + * Must be called with vm_token held. */ void vm_page_unmanage(vm_page_t m) { ASSERT_IN_CRIT_SECTION(); + ASSERT_LWKT_TOKEN_HELD(&vm_token); if ((m->flags & PG_UNMANAGED) == 0) { if (m->wire_count == 0) vm_page_unqueue(m); @@ -1251,6 +1278,7 @@ vm_page_unwire(vm_page_t m, int activate) * except without unmapping it from the process address space. * * This routine may not block. + * The caller must hold vm_token. */ static __inline void _vm_page_deactivate(vm_page_t m, int athead) @@ -1280,20 +1308,26 @@ _vm_page_deactivate(vm_page_t m, int athead) } } +/* + * Attempt to deactivate a page. + * + * No requirements. + */ void vm_page_deactivate(vm_page_t m) { - crit_enter(); - lwkt_gettoken(&vm_token); - _vm_page_deactivate(m, 0); - lwkt_reltoken(&vm_token); - crit_exit(); + crit_enter(); + lwkt_gettoken(&vm_token); + _vm_page_deactivate(m, 0); + lwkt_reltoken(&vm_token); + crit_exit(); } /* - * vm_page_try_to_cache: - * + * Attempt to move a page to PQ_CACHE. * Returns 0 on failure, 1 on success + * + * No requirements. */ int vm_page_try_to_cache(vm_page_t m) @@ -1321,6 +1355,8 @@ vm_page_try_to_cache(vm_page_t m) /* * Attempt to free the page. If we cannot free it, we do nothing. * 1 is returned on success, 0 on failure. + * + * No requirements. */ int vm_page_try_to_free(vm_page_t m) @@ -1352,12 +1388,14 @@ vm_page_try_to_free(vm_page_t m) * * Put the specified page onto the page cache queue (if appropriate). * + * The caller must hold vm_token. * This routine may not block. */ void vm_page_cache(vm_page_t m) { ASSERT_IN_CRIT_SECTION(); + ASSERT_LWKT_TOKEN_HELD(&vm_token); if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy || m->wire_count || m->hold_count) { @@ -1427,6 +1465,8 @@ vm_page_cache(vm_page_t m) * system to balance the queues, potentially recovering other unrelated * space from active. The idea is to not force this to happen too * often. + * + * No requirements. */ void vm_page_dontneed(vm_page_t m) @@ -1490,6 +1530,8 @@ vm_page_dontneed(vm_page_t m) * This routine may be called from mainline code without spl protection and * be guarenteed a busied page associated with the object at the specified * index. + * + * No requirements. */ vm_page_t vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags) @@ -1539,8 +1581,11 @@ done: * a page. May not block. * * Inputs are required to range within a page. + * + * No requirements. + * Non blocking. */ -__inline int +int vm_page_bits(int base, int size) { int first_bit; @@ -1629,6 +1674,9 @@ _vm_page_zero_valid(vm_page_t m, int base, int size) * We set valid bits inclusive of any overlap, but we can only * clear dirty bits for DEV_BSIZE chunks that are fully within * the range. + * + * Page must be busied? + * No other requirements. */ void vm_page_set_valid(vm_page_t m, int base, int size) @@ -1644,6 +1692,9 @@ vm_page_set_valid(vm_page_t m, int base, int size) * NOTE: This function does not clear the pmap modified bit. * Also note that e.g. NFS may use a byte-granular base * and size. + * + * Page must be busied? + * No other requirements. */ void vm_page_set_validclean(vm_page_t m, int base, int size) @@ -1662,6 +1713,9 @@ vm_page_set_validclean(vm_page_t m, int base, int size) /* * Set valid & dirty. Used by buwrite() + * + * Page must be busied? + * No other requirements. */ void vm_page_set_validdirty(vm_page_t m, int base, int size) @@ -1681,6 +1735,9 @@ vm_page_set_validdirty(vm_page_t m, int base, int size) * NOTE: This function does not clear the pmap modified bit. * Also note that e.g. NFS may use a byte-granular base * and size. + * + * Page must be busied? + * No other requirements. */ void vm_page_clear_dirty(vm_page_t m, int base, int size) @@ -1697,6 +1754,9 @@ vm_page_clear_dirty(vm_page_t m, int base, int size) * * Also make sure the related object and vnode reflect the fact that the * object may now contain a dirty page. + * + * Page must be busied? + * No other requirements. */ void vm_page_dirty(vm_page_t m) @@ -1717,7 +1777,9 @@ vm_page_dirty(vm_page_t m) * Invalidates DEV_BSIZE'd chunks within a page. Both the * valid and dirty bits for the effected areas are cleared. * - * May not block. + * Page must be busied? + * Does not block. + * No other requirements. */ void vm_page_set_invalid(vm_page_t m, int base, int size) @@ -1738,6 +1800,9 @@ vm_page_set_invalid(vm_page_t m, int base, int size) * * Pages are most often semi-valid when the end of a file is mapped * into memory and the file's size is not page aligned. + * + * Page must be busied? + * No other requirements. */ void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) @@ -1780,7 +1845,8 @@ vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) * will return FALSE in the degenerate case where the page is entirely * invalid, and TRUE otherwise. * - * May not block. + * Does not block. + * No other requirements. */ int vm_page_is_valid(vm_page_t m, int base, int size) @@ -1795,6 +1861,9 @@ vm_page_is_valid(vm_page_t m, int base, int size) /* * update dirty bits from pmap/mmu. May not block. + * + * Caller must hold vm_token if non-blocking operation desired. + * No other requirements. */ void vm_page_test_dirty(vm_page_t m) diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h index 50d25e1bc9..ee7a3a5ae0 100644 --- a/sys/vm/vm_page.h +++ b/sys/vm/vm_page.h @@ -405,6 +405,7 @@ vm_page_flag_clear(vm_page_t m, unsigned int bits) static __inline void vm_page_busy(vm_page_t m) { + ASSERT_LWKT_TOKEN_HELD(&vm_token); KASSERT((m->flags & PG_BUSY) == 0, ("vm_page_busy: page already busy!!!")); vm_page_flag_set(m, PG_BUSY); @@ -484,7 +485,8 @@ vm_page_io_finish(vm_page_t m) #define VM_ALLOC_QUICK 0x10 /* like NORMAL but do not use cache */ #define VM_ALLOC_RETRY 0x80 /* indefinite block (vm_page_grab()) */ -void vm_page_unhold(vm_page_t mem); +void vm_page_hold(vm_page_t); +void vm_page_unhold(vm_page_t); void vm_page_activate (vm_page_t); vm_page_t vm_page_alloc (struct vm_object *, vm_pindex_t, int); vm_page_t vm_page_grab (struct vm_object *, vm_pindex_t, int); @@ -522,27 +524,6 @@ void vm_contig_pg_free(int, u_long); void vm_page_event_internal(vm_page_t, vm_page_event_t); void vm_page_dirty(vm_page_t m); -/* - * Holding a page keeps it from being reused. Other parts of the system - * can still disassociate the page from its current object and free it, or - * perform read or write I/O on it and/or otherwise manipulate the page, - * but if the page is held the VM system will leave the page and its data - * intact and not reuse the page for other purposes until the last hold - * reference is released. (see vm_page_wire() if you want to prevent the - * page from being disassociated from its object too). - * - * This routine must be called while at splvm() or better. - * - * The caller must still validate the contents of the page and, if necessary, - * wait for any pending I/O (e.g. vm_page_sleep_busy() loop) to complete - * before manipulating the page. - */ -static __inline void -vm_page_hold(vm_page_t mem) -{ - mem->hold_count++; -} - /* * Reduce the protection of a page. This routine never raises the * protection and therefore can be safely called if the page is already diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c index 70fcbb91ef..ee05321214 100644 --- a/sys/vm/vnode_pager.c +++ b/sys/vm/vnode_pager.c @@ -346,6 +346,8 @@ vnode_pager_setsize(struct vnode *vp, vm_ooffset_t nsize) if (nsize == vp->v_filesize) return; + lwkt_gettoken(&vm_token); + /* * Has changed size. Adjust the VM object's size and v_filesize * before we start scanning pages to prevent new pages from being @@ -437,6 +439,7 @@ vnode_pager_setsize(struct vnode *vp, vm_ooffset_t nsize) } else { vp->v_filesize = nsize; } + lwkt_reltoken(&vm_token); } /* @@ -597,6 +600,7 @@ vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *mpp, int bytecount, /* * Severe hack to avoid deadlocks with the buffer cache */ + lwkt_gettoken(&vm_token); for (i = 0; i < count; ++i) { vm_page_t mt = mpp[i]; @@ -605,6 +609,7 @@ vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *mpp, int bytecount, vm_page_busy(mt); vm_page_io_finish(mt); } + lwkt_reltoken(&vm_token); /* * Calculate the actual number of bytes read and clean up the