kernel: Correctly handle fake pages
authorMatthew Dillon <dillon@apollo.backplane.com>
Fri, 12 Jul 2013 06:35:54 +0000 (08:35 +0200)
committerFran├žois Tigeot <ftigeot@wolfpond.org>
Tue, 30 Jul 2013 18:46:08 +0000 (20:46 +0200)
12 files changed:
sys/cpu/i386/include/pmap.h
sys/cpu/x86_64/include/pmap.h
sys/dev/drm2/drm_vm.c
sys/platform/pc32/i386/pmap.c
sys/platform/pc32/include/pmap.h
sys/platform/pc64/x86_64/pmap.c
sys/platform/vkernel/platform/pmap.c
sys/platform/vkernel64/platform/pmap.c
sys/vm/device_pager.c
sys/vm/pmap.h
sys/vm/vm_page.c
sys/vm/vm_page.h

index 761752a..fb27cdc 100644 (file)
@@ -71,6 +71,7 @@
 /* Our various interpretations of the above */
 #define PG_W           PG_AVAIL1       /* "Wired" pseudoflag */
 #define        PG_MANAGED      PG_AVAIL2
+#define        PG_DEVICE       PG_AVAIL3
 #define        PG_FRAME        (~((vm_paddr_t)PAGE_MASK))
 #define        PG_PROT         (PG_RW|PG_U)    /* all protection bits . */
 #define PG_N           (PG_NC_PWT|PG_NC_PCD)   /* Non-cacheable */
index 82072e5..ab2cc7f 100644 (file)
@@ -70,6 +70,7 @@
 /* Our various interpretations of the above */
 #define PG_W           PG_AVAIL1       /* "Wired" pseudoflag */
 #define        PG_MANAGED      PG_AVAIL2
+#define        PG_DEVICE       PG_AVAIL3
 #define        PG_FRAME        (0x000ffffffffff000ul)
 #define        PG_PS_FRAME     (0x000fffffffe00000ul)
 #define        PG_PROT         (PG_RW|PG_U)    /* all protection bits . */
index cbbf16e..859dc78 100644 (file)
@@ -188,6 +188,8 @@ static void page_init(vm_page_t m, vm_paddr_t paddr, int pat_mode)
 {
        bzero(m, sizeof(*m));
 
+       pmap_page_init(m);
+
         //m->flags = PG_BUSY | PG_FICTITIOUS;
         m->flags = PG_FICTITIOUS;
         m->valid = VM_PAGE_BITS_ALL;
index 05872fd..02530bd 100644 (file)
@@ -582,6 +582,15 @@ pmap_init2(void)
        zinitna(pvzone, &pvzone_obj, NULL, 0, entry_max, ZONE_INTERRUPT, 1);
 }
 
+/*
+ * Typically used to initialize a fictitious page by vm/device_pager.c
+ */
+void
+pmap_page_init(struct vm_page *m)
+{
+       vm_page_init(m);
+       TAILQ_INIT(&m->md.pv_list);
+}
 
 /***************************************************
  * Low level helper routines.....
@@ -595,9 +604,7 @@ test_m_maps_pv(vm_page_t m, pv_entry_t pv)
        pv_entry_t spv;
 
        crit_enter();
-#ifdef PMAP_DEBUG
        KKASSERT(pv->pv_m == m);
-#endif
        TAILQ_FOREACH(spv, &m->md.pv_list, pv_list) {
                if (pv == spv) {
                        crit_exit();
@@ -1654,10 +1661,8 @@ free_pv_entry(pv_entry_t pv)
 {
        struct mdglobaldata *gd;
 
-#ifdef PMAP_DEBUG
        KKASSERT(pv->pv_m != NULL);
        pv->pv_m = NULL;
-#endif
        gd = mdcpu;
        pv_entry_count--;
        if (gd->gd_freepv == NULL)
@@ -1749,20 +1754,23 @@ pmap_remove_entry(struct pmap *pmap, vm_page_t m,
 
        /*
         * Cannot block
+        *
+        * XXX very poor performance for PG_FICTITIOUS pages (m will be NULL).
         */
        ASSERT_LWKT_TOKEN_HELD(&vm_token);
-       if (m->md.pv_list_count < pmap->pm_stats.resident_count) {
+       if (m && m->md.pv_list_count < pmap->pm_stats.resident_count) {
                TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
                        if (pmap == pv->pv_pmap && va == pv->pv_va) 
                                break;
                }
        } else {
                TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) {
-#ifdef PMAP_DEBUG
                        KKASSERT(pv->pv_pmap == pmap);
-#endif
-                       if (va == pv->pv_va)
+                       if (va == pv->pv_va) {
+                               if (m == NULL)  /* PG_FICTITIOUS case */
+                                       m = pv->pv_m;
                                break;
+                       }
                }
        }
        KKASSERT(pv);
@@ -1798,10 +1806,8 @@ static void
 pmap_insert_entry(pmap_t pmap, pv_entry_t pv, vm_offset_t va,
                  vm_page_t mpte, vm_page_t m)
 {
-#ifdef PMAP_DEBUG
        KKASSERT(pv->pv_m == NULL);
        pv->pv_m = m;
-#endif
        pv->pv_va = va;
        pv->pv_pmap = pmap;
        pv->pv_ptem = mpte;
@@ -1848,7 +1854,10 @@ pmap_remove_pte(struct pmap *pmap, unsigned *ptq, vm_offset_t va,
        KKASSERT(pmap->pm_stats.resident_count > 0);
        --pmap->pm_stats.resident_count;
        if (oldpte & PG_MANAGED) {
-               m = PHYS_TO_VM_PAGE(oldpte);
+               if (oldpte & PG_DEVICE)
+                       m = NULL;
+               else
+                       m = PHYS_TO_VM_PAGE(oldpte);
                if (oldpte & PG_M) {
 #if defined(PMAP_DIAGNOSTIC)
                        if (pmap_nw_modified((pt_entry_t) oldpte)) {
@@ -1857,10 +1866,10 @@ pmap_remove_pte(struct pmap *pmap, unsigned *ptq, vm_offset_t va,
                                        (void *)va, (long)oldpte);
                        }
 #endif
-                       if (pmap_track_modified(va))
+                       if (m && pmap_track_modified(va))
                                vm_page_dirty(m);
                }
-               if (oldpte & PG_A)
+               if (m && (oldpte & PG_A))
                        vm_page_flag_set(m, PG_REFERENCED);
                pmap_remove_entry(pmap, m, va, info);
        } else {
@@ -2020,7 +2029,7 @@ pmap_remove_all(vm_page_t m)
        pv_entry_t pv;
        pmap_t pmap;
 
-       if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
+       if (!pmap_initialized /* || (m->flags & PG_FICTITIOUS)*/)
                return;
        if (TAILQ_EMPTY(&m->md.pv_list))
                return;
@@ -2047,7 +2056,7 @@ pmap_remove_all(vm_page_t m)
                pmap_inval_deinterlock(&info, pmap);
                if (tpte & PG_A)
                        vm_page_flag_set(m, PG_REFERENCED);
-               KKASSERT(PHYS_TO_VM_PAGE(tpte) == m);
+               KKASSERT((tpte & PG_DEVICE) || PHYS_TO_VM_PAGE(tpte) == m);
 
                /*
                 * Update the vm_page_t clean and reference bits.
@@ -2063,9 +2072,7 @@ pmap_remove_all(vm_page_t m)
                        if (pmap_track_modified(pv->pv_va))
                                vm_page_dirty(m);
                }
-#ifdef PMAP_DEBUG
                KKASSERT(pv->pv_m == m);
-#endif
                KKASSERT(pv == TAILQ_FIRST(&m->md.pv_list));
                TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
                TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
@@ -2158,15 +2165,20 @@ again:
                        if (pbits & PG_MANAGED) {
                                m = NULL;
                                if (pbits & PG_A) {
-                                       m = PHYS_TO_VM_PAGE(pbits);
-                                       vm_page_flag_set(m, PG_REFERENCED);
+                                       if ((pbits & PG_DEVICE) == 0) {
+                                               m = PHYS_TO_VM_PAGE(pbits);
+                                               vm_page_flag_set(m,
+                                                                PG_REFERENCED);
+                                       }
                                        cbits &= ~PG_A;
                                }
                                if (pbits & PG_M) {
                                        if (pmap_track_modified(i386_ptob(sindex))) {
-                                               if (m == NULL)
-                                                       m = PHYS_TO_VM_PAGE(pbits);
-                                               vm_page_dirty(m);
+                                               if ((pbits & PG_DEVICE) == 0) {
+                                                       if (m == NULL)
+                                                               m = PHYS_TO_VM_PAGE(pbits);
+                                                       vm_page_dirty(m);
+                                               }
                                                cbits &= ~PG_M;
                                        }
                                }
@@ -2232,7 +2244,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
         * This can block, get it before we do anything important.
         */
        if (pmap_initialized &&
-           (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
+           (m->flags & (/*PG_FICTITIOUS|*/PG_UNMANAGED)) == 0) {
                pv = get_pv_entry();
        } else {
                pv = NULL;
@@ -2294,7 +2306,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
                 * so we go ahead and sense modify status.
                 */
                if (origpte & PG_MANAGED) {
-                       if ((origpte & PG_M) && pmap_track_modified(va)) {
+                       if ((origpte & PG_M) &&
+                           (origpte & PG_DEVICE) == 0 &&
+                           pmap_track_modified(va)) {
                                vm_page_t om;
                                om = PHYS_TO_VM_PAGE(opa);
                                vm_page_dirty(om);
@@ -2339,7 +2353,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
         * called at interrupt time.
         */
        if (pmap_initialized && 
-           (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
+           (m->flags & (/*PG_FICTITIOUS|*/PG_UNMANAGED)) == 0) {
                pmap_insert_entry(pmap, pv, va, mpte, m);
                pv = NULL;
                ptbase_assert(pmap);
@@ -2368,6 +2382,8 @@ validate:
                newpte |= PG_U;
        if (pmap == &kernel_pmap)
                newpte |= pgeflag;
+       if (m->flags & PG_FICTITIOUS)
+               newpte |= PG_DEVICE;
 
        /*
         * If the mapping or permission bits are different, we need
@@ -2436,6 +2452,7 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m)
        unsigned ptepindex;
        vm_offset_t ptepa;
        pmap_inval_info info;
+       vm_offset_t newpte;
        pv_entry_t pv;
 
        vm_object_hold(pmap->pm_pteobj);
@@ -2445,7 +2462,7 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m)
         * This can block, get it before we do anything important.
         */
        if (pmap_initialized &&
-           (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
+           (m->flags & (/*PG_FICTITIOUS|*/PG_UNMANAGED)) == 0) {
                pv = get_pv_entry();
        } else {
                pv = NULL;
@@ -2535,7 +2552,7 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m)
         * Enter on the PV list if part of our managed memory
         */
        if (pmap_initialized &&
-           (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
+           (m->flags & (/*PG_FICTITIOUS|*/PG_UNMANAGED)) == 0) {
                pmap_insert_entry(pmap, pv, va, mpte, m);
                pv = NULL;
                vm_page_flag_set(m, PG_MAPPED);
@@ -2551,10 +2568,12 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m)
        /*
         * Now validate mapping with RO protection
         */
-       if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED))
-               *pte = pa | PG_V | PG_U;
-       else
-               *pte = pa | PG_V | PG_U | PG_MANAGED;
+       newpte = pa | PG_V | PG_U;
+       if (m->flags & PG_FICTITIOUS)
+               newpte |= PG_DEVICE;
+       if ((m->flags & PG_UNMANAGED) == 0)
+               newpte |= PG_MANAGED;
+       *pte = newpte;
 /*     pmap_inval_add(&info, pmap, va); shouldn't be needed inval->valid */
        pmap_inval_done(&info);
        if (pv) {
@@ -2995,7 +3014,10 @@ pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
                tpte = loadandclear(pte);
                pmap_inval_deinterlock(&info, pmap);
 
-               m = PHYS_TO_VM_PAGE(tpte);
+               if (tpte & PG_DEVICE)
+                       m = pv->pv_m;
+               else
+                       m = PHYS_TO_VM_PAGE(tpte);
                test_m_maps_pv(m, pv);
 
                KASSERT(m < &vm_page_array[vm_page_array_size],
@@ -3012,10 +3034,8 @@ pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
                }
 
                npv = TAILQ_NEXT(pv, pv_plist);
-#ifdef PMAP_DEBUG
                KKASSERT(pv->pv_m == m);
                KKASSERT(pv->pv_pmap == pmap);
-#endif
                TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
                save_generation = ++pmap->pm_generation;
 
@@ -3456,14 +3476,17 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr)
 
                pa = pte & PG_FRAME;
 
-               m = PHYS_TO_VM_PAGE(pa);
+               if (pte & PG_DEVICE)
+                       m = NULL;
+               else
+                       m = PHYS_TO_VM_PAGE(pa);
 
                if (pte & PG_M) {
                        /*
                         * Modified by us
                         */
                        val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
-               } else if (m->dirty || pmap_is_modified(m)) {
+               } else if (m && (m->dirty || pmap_is_modified(m))) {
                        /*
                         * Modified by someone else
                         */
@@ -3475,8 +3498,8 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr)
                         * Referenced by us
                         */
                        val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
-               } else if ((m->flags & PG_REFERENCED) ||
-                          pmap_ts_referenced(m)) {
+               } else if (m && ((m->flags & PG_REFERENCED) ||
+                                pmap_ts_referenced(m))) {
                        /*
                         * Referenced by someone else
                         */
@@ -3618,7 +3641,10 @@ pmap_get_pgeflag(void)
 vm_page_t
 pmap_kvtom(vm_offset_t va)
 {
-       return(PHYS_TO_VM_PAGE(*vtopte(va) & PG_FRAME));
+       unsigned *ptep = vtopte(va);
+
+       KKASSERT((*ptep & PG_DEVICE) == 0);
+       return(PHYS_TO_VM_PAGE(*ptep & PG_FRAME));
 }
 
 void
index 0fdc6e9..21e2a84 100644 (file)
@@ -283,11 +283,7 @@ typedef struct pv_entry {
        TAILQ_ENTRY(pv_entry)   pv_list;
        TAILQ_ENTRY(pv_entry)   pv_plist;
        struct vm_page  *pv_ptem;       /* VM page for pte */
-#ifdef PMAP_DEBUG
        struct vm_page  *pv_m;
-#else
-       void            *pv_dummy;      /* align structure to 32 bytes */
-#endif
 } *pv_entry_t;
 
 #ifdef _KERNEL
index cf5111f..7c9126f 100644 (file)
@@ -1030,6 +1030,15 @@ pmap_init2(void)
        zinitna(pvzone, &pvzone_obj, NULL, 0, entry_max, ZONE_INTERRUPT, 1);
 }
 
+/*
+ * Typically used to initialize a fictitious page by vm/device_pager.c
+ */
+void
+pmap_page_init(struct vm_page *m)
+{
+       vm_page_init(m);
+       TAILQ_INIT(&m->md.pv_list);
+}
 
 /***************************************************
  * Low level helper routines.....
@@ -1812,6 +1821,9 @@ retry:
         * We currently allow any type of object to use this optimization.
         * The object itself does NOT have to be sized to a multiple of the
         * segment size, but the memory mapping does.
+        *
+        * XXX don't handle devices currently, because VM_PAGE_TO_PHYS()
+        *     won't work as expected.
         */
        if (entry == NULL ||
            pmap_mmu_optimize == 0 ||                   /* not enabled */
@@ -1819,6 +1831,8 @@ retry:
            entry->inheritance != VM_INHERIT_SHARE ||   /* not shared */
            entry->maptype != VM_MAPTYPE_NORMAL ||      /* weird map type */
            entry->object.vm_object == NULL ||          /* needs VM object */
+           entry->object.vm_object->type == OBJT_DEVICE ||     /* ick */
+           entry->object.vm_object->type == OBJT_MGTDEVICE ||  /* ick */
            (entry->offset & SEG_MASK) ||               /* must be aligned */
            (entry->start & SEG_MASK)) {
                return(pmap_allocpte(pmap, ptepindex, pvpp));
@@ -2255,6 +2269,10 @@ pmap_remove_pv_pte(pv_entry_t pv, pv_entry_t pvp, struct pmap_inval_info *info)
                 *
                 * NOTE: pv's must be locked bottom-up to avoid deadlocking.
                 *       pv is a pte_pv so we can safely lock pt_pv.
+                *
+                * NOTE: FICTITIOUS pages may have multiple physical mappings
+                *       so PHYS_TO_VM_PAGE() will not necessarily work for
+                *       terminal ptes.
                 */
                vm_pindex_t pt_pindex;
                pt_entry_t *ptep;
@@ -2295,8 +2313,13 @@ pmap_remove_pv_pte(pv_entry_t pv, pv_entry_t pvp, struct pmap_inval_info *info)
                                pte, pv->pv_pindex,
                                pv->pv_pindex < pmap_pt_pindex(0));
                }
+               /* PHYS_TO_VM_PAGE() will not work for FICTITIOUS pages */
                /*KKASSERT((pte & (PG_MANAGED|PG_V)) == (PG_MANAGED|PG_V));*/
-               p = PHYS_TO_VM_PAGE(pte & PG_FRAME);
+               if (pte & PG_DEVICE)
+                       p = pv->pv_m;
+               else
+                       p = PHYS_TO_VM_PAGE(pte & PG_FRAME);
+               /* p = pv->pv_m; */
 
                if (pte & PG_M) {
                        if (pmap_track_modified(ptepindex))
@@ -3402,6 +3425,7 @@ pmap_remove_callback(pmap_t pmap, struct pmap_scan_info *info,
                if (info->doinval)
                        pmap_inval_deinterlock(&info->inval, pmap);
                atomic_add_long(&pmap->pm_stats.resident_count, -1);
+               KKASSERT((pte & PG_DEVICE) == 0);
                if (vm_page_unwire_quick(PHYS_TO_VM_PAGE(pte & PG_FRAME)))
                        panic("pmap_remove: shared pgtable1 bad wirecount");
                if (vm_page_unwire_quick(pt_pv->pv_m))
@@ -3422,7 +3446,7 @@ pmap_remove_all(vm_page_t m)
        struct pmap_inval_info info;
        pv_entry_t pv;
 
-       if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
+       if (!pmap_initialized /* || (m->flags & PG_FICTITIOUS)*/)
                return;
 
        pmap_inval_init(&info);
@@ -3507,16 +3531,22 @@ again:
        if (pte_pv) {
                m = NULL;
                if (pbits & PG_A) {
-                       m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
-                       KKASSERT(m == pte_pv->pv_m);
-                       vm_page_flag_set(m, PG_REFERENCED);
+                       if ((pbits & PG_DEVICE) == 0) {
+                               m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
+                               KKASSERT(m == pte_pv->pv_m);
+                               vm_page_flag_set(m, PG_REFERENCED);
+                       }
                        cbits &= ~PG_A;
                }
                if (pbits & PG_M) {
                        if (pmap_track_modified(pte_pv->pv_pindex)) {
-                               if (m == NULL)
-                                       m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
-                               vm_page_dirty(m);
+                               if ((pbits & PG_DEVICE) == 0) {
+                                       if (m == NULL) {
+                                               m = PHYS_TO_VM_PAGE(pbits &
+                                                                   PG_FRAME);
+                                       }
+                                       vm_page_dirty(m);
+                               }
                                cbits &= ~PG_M;
                        }
                }
@@ -3528,6 +3558,10 @@ again:
                 * When asked to protect something in a shared page table
                 * page we just unmap the page table page.  We have to
                 * invalidate the tlb in this situation.
+                *
+                * XXX Warning, shared page tables will not be used for
+                * OBJT_DEVICE or OBJT_MGTDEVICE (PG_FICTITIOUS) mappings
+                * so PHYS_TO_VM_PAGE() should be safe here.
                 */
                pte = pte_load_clear(ptep);
                pmap_inval_invltlb(&info->inval);
@@ -3621,7 +3655,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
                pte_pv = NULL;
                pt_pv = NULL;
                ptep = vtopte(va);
-       } else if (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) { /* XXX */
+       } else if (m->flags & (/*PG_FICTITIOUS |*/ PG_UNMANAGED)) { /* XXX */
                pte_pv = NULL;
                if (va >= VM_MAX_USER_ADDRESS) {
                        pt_pv = NULL;
@@ -3665,6 +3699,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
        if (pmap == &kernel_pmap)
                newpte |= pgeflag;
        newpte |= pat_pte_index[m->pat_mode];
+       if (m->flags & PG_FICTITIOUS)
+               newpte |= PG_DEVICE;
 
        /*
         * It is possible for multiple faults to occur in threaded
@@ -4607,7 +4643,10 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr)
 
                pa = pte & PG_FRAME;
 
-               m = PHYS_TO_VM_PAGE(pa);
+               if (pte & PG_DEVICE)
+                       m = NULL;
+               else
+                       m = PHYS_TO_VM_PAGE(pa);
 
                /*
                 * Modified by us
@@ -4617,7 +4656,7 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr)
                /*
                 * Modified by someone
                 */
-               else if (m->dirty || pmap_is_modified(m))
+               else if (m && (m->dirty || pmap_is_modified(m)))
                        val |= MINCORE_MODIFIED_OTHER;
                /*
                 * Referenced by us
@@ -4628,7 +4667,8 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr)
                /*
                 * Referenced by someone
                 */
-               else if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(m)) {
+               else if (m && ((m->flags & PG_REFERENCED) ||
+                               pmap_ts_referenced(m))) {
                        val |= MINCORE_REFERENCED_OTHER;
                        vm_page_flag_set(m, PG_REFERENCED);
                }
@@ -4752,7 +4792,10 @@ pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
 vm_page_t
 pmap_kvtom(vm_offset_t va)
 {
-       return(PHYS_TO_VM_PAGE(*vtopte(va) & PG_FRAME));
+       pt_entry_t *ptep = vtopte(va);
+
+       KKASSERT((*ptep & PG_DEVICE) == 0);
+       return(PHYS_TO_VM_PAGE(*ptep & PG_FRAME));
 }
 
 /*
index f9fc7d6..1b2ebb7 100644 (file)
@@ -140,6 +140,16 @@ pmap_init2(void)
 }
 
 /*
+ * Typically used to initialize a fictitious page by vm/device_pager.c
+ */
+void
+pmap_page_init(struct vm_page *m)
+{
+       vm_page_init(m);
+       TAILQ_INIT(&m->md.pv_list);
+}
+
+/*
  * Bootstrap the kernel_pmap so it can be used with pmap_enter().  
  *
  * NOTE! pm_pdir for the kernel pmap is offset so VA's translate
index 4003fcc..eb19bd8 100644 (file)
@@ -435,6 +435,16 @@ create_pagetables(vm_paddr_t *firstaddr, int64_t ptov_offset)
 }
 
 /*
+ * Typically used to initialize a fictitious page by vm/device_pager.c
+ */
+void
+pmap_page_init(struct vm_page *m)
+{
+       vm_page_init(m);
+       TAILQ_INIT(&m->md.pv_list);
+}
+
+/*
  *     Bootstrap the system enough to run with virtual memory.
  *
  *     On the i386 this is called after mapping has already been enabled
index 6f93919..4e44259 100644 (file)
@@ -175,13 +175,11 @@ void
 cdev_pager_free_page(vm_object_t object, vm_page_t m)
 {
        if (object->type == OBJT_MGTDEVICE) {
-               kprintf("x");
                KKASSERT((m->flags & PG_FICTITIOUS) != 0);
                pmap_page_protect(m, VM_PROT_NONE);
                vm_page_remove(m);
                vm_page_wakeup(m);
        } else if (object->type == OBJT_DEVICE) {
-               kprintf("y");
                TAILQ_REMOVE(&object->un_pager.devp.devp_pglist, m, pageq);
                dev_pager_putfake(m);
        }
@@ -264,6 +262,8 @@ dev_pager_getfake(vm_paddr_t paddr, int pat_mode)
 
        m = kmalloc(sizeof(*m), M_FICTITIOUS_PAGES, M_WAITOK|M_ZERO);
 
+       pmap_page_init(m);
+
        m->flags = PG_BUSY | PG_FICTITIOUS;
        m->valid = VM_PAGE_BITS_ALL;
        m->dirty = 0;
index eeec581..99f3451 100644 (file)
@@ -167,6 +167,7 @@ void                 pmap_object_init_pt (pmap_t pmap, vm_offset_t addr,
                    vm_offset_t size, int pagelimit);
 boolean_t       pmap_page_exists_quick (pmap_t pmap, struct vm_page *m);
 void            pmap_page_protect (struct vm_page *m, vm_prot_t prot);
+void            pmap_page_init (struct vm_page *m);
 vm_paddr_t      pmap_phys_address (vm_pindex_t);
 void            pmap_pinit (pmap_t);
 void            pmap_puninit (pmap_t);
index 4acade1..42ddcd0 100644 (file)
@@ -528,6 +528,12 @@ rb_vm_page_compare(struct vm_page *p1, struct vm_page *p2)
        return(0);
 }
 
+void
+vm_page_init(vm_page_t m)
+{
+       /* do nothing for now.  Called from pmap_page_init() */
+}
+
 /*
  * Each page queue has its own spin lock, which is fairly optimal for
  * allocating and freeing pages at least.
index 5decae7..ba35883 100644 (file)
@@ -431,6 +431,7 @@ void vm_page_queue_spin_unlock(vm_page_t);
 void vm_page_queues_spin_unlock(u_short);
 void vm_page_and_queue_spin_unlock(vm_page_t m);
 
+void vm_page_init(vm_page_t m);
 void vm_page_io_finish(vm_page_t m);
 void vm_page_io_start(vm_page_t m);
 void vm_page_need_commit(vm_page_t m);