/* sf invalid now */
vm_page_busy_wait(m, FALSE, "sockpgf");
vm_page_unwire(m, 0);
- if (m->wire_count == 0 && m->object == NULL) {
+ if (m->object == NULL &&
+ m->wire_count == 0 &&
+ (m->flags & PG_NEED_COMMIT) == 0) {
vm_page_free(m);
} else {
vm_page_wakeup(m);
static void vfs_clean_pages(struct buf *bp);
static void vfs_clean_one_page(struct buf *bp, int pageno, vm_page_t m);
+#if 0
static void vfs_dirty_one_page(struct buf *bp, int pageno, vm_page_t m);
+#endif
static void vfs_vmio_release(struct buf *bp);
static int flushbufqueues(bufq_type_t q);
static vm_page_t bio_page_alloc(vm_object_t obj, vm_pindex_t pg, int deficit);
}
/*
- * Set valid & dirty.
+ * Mark as needing a commit.
*/
for (i = 0; i < bp->b_xio.xio_npages; i++) {
m = bp->b_xio.xio_pages[i];
- vfs_dirty_one_page(bp, i, m);
+ vm_page_need_commit(m);
}
bqrelse(bp);
}
/*
* If the wire_count has dropped to 0 we may need to take
- * further action before unbusying the page
+ * further action before unbusying the page.
+ *
+ * WARNING: vm_page_try_*() also checks PG_NEED_COMMIT for us.
*/
if (m->wire_count == 0) {
vm_page_flag_clear(m, PG_ZERO);
kprintf(" VDEV, loffset: %lld, flags: 0x%08x, npages: %d\n",
(long long)bp->b_loffset,
bp->b_flags, bp->b_xio.xio_npages);
- kprintf(" valid: 0x%x, dirty: 0x%x, wired: %d\n",
- m->valid, m->dirty, m->wire_count);
+ kprintf(" valid: 0x%x, dirty: 0x%x, "
+ "wired: %d\n",
+ m->valid, m->dirty,
+ m->wire_count);
panic("biodone: page busy < 0");
}
vm_page_io_finish(m);
vm_page_set_validclean(m, soff & PAGE_MASK, eoff - soff);
}
+#if 0
/*
* Similar to vfs_clean_one_page() but sets the bits to valid and dirty.
* The page data is assumed to be valid (there is no zeroing here).
return;
vm_page_set_validdirty(m, soff & PAGE_MASK, eoff - soff);
}
+#endif
/*
* vfs_bio_clrbuf:
continue;
}
vm_page_test_dirty(m);
- if (m->dirty) {
+ if (m->dirty || (m->flags & PG_NEED_COMMIT)) {
vm_object_hold(object);
KKASSERT(m->object == object);
((phys & (alignment - 1)) == 0) &&
(((phys ^ (phys + size - 1)) & ~(boundary - 1)) == 0) &&
m->busy == 0 && m->wire_count == 0 &&
- m->hold_count == 0 && (m->flags & PG_BUSY) == 0
-
- ) {
+ m->hold_count == 0 &&
+ (m->flags & (PG_BUSY | PG_NEED_COMMIT)) == 0)
+ {
break;
}
}
(VM_PAGE_TO_PHYS(&m[-1]) + PAGE_SIZE)) ||
((pqtype != PQ_FREE) && (pqtype != PQ_CACHE)) ||
m->busy || m->wire_count ||
- m->hold_count || (m->flags & PG_BUSY)
- ) {
+ m->hold_count ||
+ (m->flags & (PG_BUSY | PG_NEED_COMMIT)))
+ {
start++;
goto again;
}
if (pqtype == PQ_CACHE &&
m->hold_count == 0 &&
m->wire_count == 0 &&
- (m->flags & PG_UNMANAGED) == 0) {
+ (m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) == 0) {
vm_page_protect(m, VM_PROT_NONE);
KKASSERT((m->flags & PG_MAPPED) == 0);
KKASSERT(m->dirty == 0);
break;
}
if ((mt->flags &
- (PG_FICTITIOUS | PG_UNMANAGED)) ||
+ (PG_FICTITIOUS | PG_UNMANAGED |
+ PG_NEED_COMMIT)) ||
mt->hold_count ||
mt->wire_count) {
vm_page_wakeup(mt);
"busied page %p on queue %d\n", p, p->queue);
vm_page_wakeup(p);
} else if (p->wire_count == 0) {
+ /*
+ * NOTE: PG_NEED_COMMIT is ignored.
+ */
vm_page_free(p);
mycpu->gd_cnt.v_pfree++;
} else {
* mess with. Things can break if we mess with pages in
* any of the below states.
*/
- if (
- /*m->hold_count ||*/
- m->wire_count ||
- (m->flags & PG_UNMANAGED) ||
+ if (m->wire_count ||
+ (m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) ||
m->valid != VM_PAGE_BITS_ALL
) {
vm_page_wakeup(m);
}
if (op & OBSC_COLLAPSE_NOWAIT) {
- if (p->valid == 0 /*|| p->hold_count*/ ||
- p->wire_count) {
+ if (p->valid == 0 ||
+ p->wire_count ||
+ (p->flags & PG_NEED_COMMIT)) {
vm_page_wakeup(p);
return(0);
}
* cache buffer, and the buffer might be marked B_CACHE.
* This is fine as part of a truncation but VFSs must be
* sure to fix the buffer up when re-extending the file.
+ *
+ * NOTE! PG_NEED_COMMIT is ignored.
*/
if (p->wire_count != 0) {
vm_page_protect(p, VM_PROT_NONE);
/*
* We successfully busied the page
*/
- if ((m->flags & PG_UNMANAGED) == 0 &&
+ if ((m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) == 0 &&
m->hold_count == 0 &&
- m->wire_count == 0) {
+ m->wire_count == 0 &&
+ (m->dirty & m->valid) == 0) {
vm_page_spin_unlock(m);
pagedaemon_wakeup();
return(m);
}
+
+ /*
+ * The page cannot be recycled, deactivate it.
+ */
_vm_page_deactivate_locked(m, 0);
if (_vm_page_wakeup(m)) {
vm_page_spin_unlock(m);
* lock) nobody else should be able to mess with the
* page before us.
*/
- KKASSERT((m->flags & PG_UNMANAGED) == 0);
+ KKASSERT((m->flags & (PG_UNMANAGED |
+ PG_NEED_COMMIT)) == 0);
KKASSERT(m->hold_count == 0);
KKASSERT(m->wire_count == 0);
vm_page_spin_unlock(m);
/*
* Clear the UNMANAGED flag when freeing an unmanaged page.
+ * Clear the NEED_COMMIT flag
*/
- if (m->flags & PG_UNMANAGED) {
+ if (m->flags & PG_UNMANAGED)
vm_page_flag_clear(m, PG_UNMANAGED);
- }
+ if (m->flags & PG_NEED_COMMIT)
+ vm_page_flag_clear(m, PG_NEED_COMMIT);
if (m->hold_count != 0) {
vm_page_flag_clear(m, PG_ZERO);
* The page is not PG_ZERO'd so return it.
*/
vm_page_spin_unlock(m);
- KKASSERT((m->flags & PG_UNMANAGED) == 0);
+ KKASSERT((m->flags & (PG_UNMANAGED |
+ PG_NEED_COMMIT)) == 0);
KKASSERT(m->hold_count == 0);
KKASSERT(m->wire_count == 0);
break;
return(0);
}
if (m->dirty || m->hold_count || m->wire_count ||
- (m->flags & PG_UNMANAGED)) {
+ (m->flags & (PG_UNMANAGED | PG_NEED_COMMIT))) {
if (_vm_page_wakeup(m)) {
vm_page_spin_unlock(m);
wakeup(m);
if (m->dirty || /* can't free if it is dirty */
m->hold_count || /* or held (XXX may be wrong) */
m->wire_count || /* or wired */
- (m->flags & PG_UNMANAGED) || /* or unmanaged */
+ (m->flags & (PG_UNMANAGED | /* or unmanaged */
+ PG_NEED_COMMIT)) || /* or needs a commit */
m->queue - m->pc == PQ_FREE || /* already on PQ_FREE */
m->queue - m->pc == PQ_HOLD) { /* already on PQ_HOLD */
if (_vm_page_wakeup(m)) {
void
vm_page_cache(vm_page_t m)
{
- if ((m->flags & PG_UNMANAGED) || m->busy ||
- m->wire_count || m->hold_count) {
+ if ((m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) ||
+ m->busy || m->wire_count || m->hold_count) {
kprintf("vm_page_cache: attempting to cache busy/held page\n");
vm_page_wakeup(m);
return;
* everything.
*/
vm_page_protect(m, VM_PROT_NONE);
- if ((m->flags & (PG_UNMANAGED|PG_MAPPED)) || m->busy ||
- m->wire_count || m->hold_count) {
+ if ((m->flags & (PG_UNMANAGED | PG_MAPPED)) ||
+ m->busy || m->wire_count || m->hold_count) {
vm_page_wakeup(m);
- } else if (m->dirty) {
+ } else if (m->dirty || (m->flags & PG_NEED_COMMIT)) {
vm_page_deactivate(m);
vm_page_wakeup(m);
} else {
vm_page_flag_clear(m, PG_SBUSY);
}
+/*
+ * Indicate that a clean VM page requires a filesystem commit and cannot
+ * be reused. Used by tmpfs.
+ */
+void
+vm_page_need_commit(vm_page_t m)
+{
+ vm_page_flag_set(m, PG_NEED_COMMIT);
+}
+
+void
+vm_page_clear_commit(vm_page_t m)
+{
+ vm_page_flag_clear(m, PG_NEED_COMMIT);
+}
+
/*
* Grab a page, blocking if it is busy and allocating a page if necessary.
* A busy page is returned or NULL. The page may or may not be valid and
*
* WARNING: Page must be busied? But vfs_dirty_one_page() will
* call this function in buwrite() so for now vm_token must
- * be held.
+ * be held.
*
* No other requirements.
*/
m->valid |= pagebits;
m->dirty |= pagebits;
if (m->object)
- vm_object_set_writeable_dirty(m->object);
+ vm_object_set_writeable_dirty(m->object);
}
/*
#define PG_NOTMETA 0x00008000 /* do not back with swap */
#define PG_ACTIONLIST 0x00010000 /* lookaside action list present */
#define PG_SBUSY 0x00020000 /* soft-busy also set */
+#define PG_NEED_COMMIT 0x00040000 /* clean page requires commit */
/*
* Misc constants.
void vm_page_io_finish(vm_page_t m);
void vm_page_io_start(vm_page_t m);
+void vm_page_need_commit(vm_page_t m);
+void vm_page_clear_commit(vm_page_t m);
void vm_page_wakeup(vm_page_t m);
void vm_page_hold(vm_page_t);
void vm_page_unhold(vm_page_t);
break;
}
vm_page_test_dirty(p);
- if ((p->dirty & p->valid) == 0 ||
+ if (((p->dirty & p->valid) == 0 &&
+ (p->flags & PG_NEED_COMMIT) == 0) ||
p->queue - p->pc != PQ_INACTIVE ||
p->wire_count != 0 || /* may be held by buf cache */
p->hold_count != 0) { /* may be undergoing I/O */
break;
}
vm_page_test_dirty(p);
- if ((p->dirty & p->valid) == 0 ||
+ if (((p->dirty & p->valid) == 0 &&
+ (p->flags & PG_NEED_COMMIT) == 0) ||
p->queue - p->pc != PQ_INACTIVE ||
p->wire_count != 0 || /* may be held by buf cache */
p->hold_count != 0) { /* may be undergoing I/O */
vm_page_dirty(m);
}
- if (m->valid == 0) {
+ if (m->valid == 0 && (m->flags & PG_NEED_COMMIT) == 0) {
/*
* Invalid pages can be easily freed
*/
vm_pageout_page_free(m);
mycpu->gd_cnt.v_dfree++;
++delta;
- } else if (m->dirty == 0) {
+ } else if (m->dirty == 0 && (m->flags & PG_NEED_COMMIT) == 0) {
/*
* Clean pages can be placed onto the cache queue.
* This effectively frees them.
++*recycle_countp;
vm_page_protect(m, VM_PROT_NONE);
if (m->dirty == 0 &&
+ (m->flags & PG_NEED_COMMIT) == 0 &&
avail_shortage - delta > 0) {
vm_page_cache(m);
} else {
* Page has been successfully busied and it and its queue
* is no longer spinlocked.
*/
- if ((m->flags & PG_UNMANAGED) ||
+ if ((m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) ||
m->hold_count ||
m->wire_count) {
vm_page_deactivate(m);
void
vnode_pager_freepage(vm_page_t m)
{
- if (m->busy || m->wire_count) {
+ if (m->busy || m->wire_count || (m->flags & PG_NEED_COMMIT)) {
vm_page_activate(m);
vm_page_wakeup(m);
} else {