kernel - simplify vm pager ops, add pre-faulting for zero-fill pages.
[dragonfly.git] / sys / vm / vm_pager.c
index 6c0a015..65bc20e 100644 (file)
@@ -62,7 +62,7 @@
  * rights to redistribute these changes.
  *
  * $FreeBSD: src/sys/vm/vm_pager.c,v 1.54.2.2 2001/11/18 07:11:00 dillon Exp $
- * $DragonFly: src/sys/vm/vm_pager.c,v 1.23 2006/12/28 21:24:02 dillon Exp $
+ * $DragonFly: src/sys/vm/vm_pager.c,v 1.24 2007/11/06 03:50:01 dillon Exp $
  */
 
 /*
@@ -99,14 +99,14 @@ extern struct pagerops physpagerops;
 
 int cluster_pbuf_freecnt = -1; /* unlimited to begin with */
 
-static int dead_pager_getpages (vm_object_t, vm_page_t *, int, int);
+static int dead_pager_getpage (vm_object_t, vm_page_t *, int);
 static vm_object_t dead_pager_alloc (void *, off_t, vm_prot_t, off_t);
 static void dead_pager_putpages (vm_object_t, vm_page_t *, int, int, int *);
-static boolean_t dead_pager_haspage (vm_object_t, vm_pindex_t, int *, int *);
+static boolean_t dead_pager_haspage (vm_object_t, vm_pindex_t);
 static void dead_pager_dealloc (vm_object_t);
 
 static int
-dead_pager_getpages(vm_object_t obj, vm_page_t *ma, int count, int req)
+dead_pager_getpage(vm_object_t obj, vm_page_t *mpp, int seqaccess)
 {
        return VM_PAGER_FAIL;
 }
@@ -129,12 +129,8 @@ dead_pager_putpages(vm_object_t object, vm_page_t *m, int count, int flags,
 }
 
 static int
-dead_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *prev, int *next)
+dead_pager_haspage(vm_object_t object, vm_pindex_t pindex)
 {
-       if (prev)
-               *prev = 0;
-       if (next)
-               *next = 0;
        return FALSE;
 }
 
@@ -148,7 +144,7 @@ static struct pagerops deadpagerops = {
        NULL,
        dead_pager_alloc,
        dead_pager_dealloc,
-       dead_pager_getpages,
+       dead_pager_getpage,
        dead_pager_putpages,
        dead_pager_haspage,
        NULL
@@ -181,6 +177,7 @@ struct vm_map pager_map;
 static int bswneeded;
 static vm_offset_t swapbkva;           /* swap buffers kva */
 static TAILQ_HEAD(swqueue, buf) bswlist;
+static struct spinlock bswspin = SPINLOCK_INITIALIZER(&bswspin);
 
 void
 vm_pager_init(void)
@@ -218,11 +215,11 @@ vm_pager_bufferinit(void)
         */
        bp = swbuf;
        for (i = 0; i < nswbuf; ++i, ++bp) {
-               bp->b_kvabase = (caddr_t)(i * MAXPHYS) + swapbkva;
+               bp->b_kvabase = (caddr_t)((intptr_t)i * MAXPHYS) + swapbkva;
                bp->b_kvasize = MAXPHYS;
                TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
                BUF_LOCKINIT(bp);
-               LIST_INIT(&bp->b_dep);
+               buf_dep_init(bp);
        }
 
        /*
@@ -334,7 +331,7 @@ initpbuf(struct buf *bp)
 }
 
 /*
- * allocate a physical buffer
+ * Allocate a physical buffer
  *
  *     There are a limited number (nswbuf) of physical buffers.  We need
  *     to make sure that no single subsystem is able to hog all of them,
@@ -347,59 +344,62 @@ initpbuf(struct buf *bp)
  *
  *     NOTE: pfreecnt can be NULL, but this 'feature' will be removed
  *     relatively soon when the rest of the subsystems get smart about it. XXX
+ *
+ * MPSAFE
  */
 struct buf *
 getpbuf(int *pfreecnt)
 {
        struct buf *bp;
 
-       crit_enter();
+       spin_lock_wr(&bswspin);
 
        for (;;) {
                if (pfreecnt) {
-                       while (*pfreecnt == 0) {
-                               tsleep(pfreecnt, 0, "wswbuf0", 0);
-                       }
+                       while (*pfreecnt == 0)
+                               ssleep(pfreecnt, &bswspin, 0, "wswbuf0", 0);
                }
 
                /* get a bp from the swap buffer header pool */
                if ((bp = TAILQ_FIRST(&bswlist)) != NULL)
                        break;
-
                bswneeded = 1;
-               tsleep(&bswneeded, 0, "wswbuf1", 0);
+               ssleep(&bswneeded, &bswspin, 0, "wswbuf1", 0);
                /* loop in case someone else grabbed one */
        }
        TAILQ_REMOVE(&bswlist, bp, b_freelist);
        if (pfreecnt)
                --*pfreecnt;
-       crit_exit();
+
+       spin_unlock_wr(&bswspin);
 
        initpbuf(bp);
        return bp;
 }
 
 /*
- * allocate a physical buffer, if one is available.
+ * Allocate a physical buffer, if one is available.
  *
  *     Note that there is no NULL hack here - all subsystems using this
  *     call understand how to use pfreecnt.
+ *
+ * MPSAFE
  */
 struct buf *
 trypbuf(int *pfreecnt)
 {
        struct buf *bp;
 
-       crit_enter();
+       spin_lock_wr(&bswspin);
+
        if (*pfreecnt == 0 || (bp = TAILQ_FIRST(&bswlist)) == NULL) {
-               crit_exit();
+               spin_unlock_wr(&bswspin);
                return NULL;
        }
        TAILQ_REMOVE(&bswlist, bp, b_freelist);
-
        --*pfreecnt;
 
-       crit_exit();
+       spin_unlock_wr(&bswspin);
 
        initpbuf(bp);
 
@@ -407,29 +407,39 @@ trypbuf(int *pfreecnt)
 }
 
 /*
- * release a physical buffer
+ * Release a physical buffer
  *
  *     NOTE: pfreecnt can be NULL, but this 'feature' will be removed
  *     relatively soon when the rest of the subsystems get smart about it. XXX
+ *
+ * MPSAFE
  */
 void
 relpbuf(struct buf *bp, int *pfreecnt)
 {
-       crit_enter();
+       int wake_bsw = 0;
+       int wake_freecnt = 0;
 
        KKASSERT(bp->b_flags & B_PAGING);
-       BUF_UNLOCK(bp);
 
-       TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
+       spin_lock_wr(&bswspin);
 
+       BUF_UNLOCK(bp);
+       TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
        if (bswneeded) {
                bswneeded = 0;
-               wakeup(&bswneeded);
+               wake_bsw = 1;
        }
        if (pfreecnt) {
                if (++*pfreecnt == 1)
-                       wakeup(pfreecnt);
+                       wake_freecnt = 1;
        }
-       crit_exit();
+
+       spin_unlock_wr(&bswspin);
+
+       if (wake_bsw)
+               wakeup(&bswneeded);
+       if (wake_freecnt)
+               wakeup(pfreecnt);
 }