kernel - MPSAFE work - Remove unused code
authorMatthew Dillon <dillon@apollo.backplane.com>
Wed, 9 Jun 2010 22:27:49 +0000 (15:27 -0700)
committerMatthew Dillon <dillon@apollo.backplane.com>
Wed, 9 Jun 2010 22:27:49 +0000 (15:27 -0700)
* Remove pmap_qenter2()
* Remove pmap_pvdump() and related helper code

sys/platform/pc32/i386/pmap.c
sys/platform/pc64/include/pmap.h
sys/platform/pc64/x86_64/pmap.c
sys/platform/vkernel/platform/pmap.c
sys/platform/vkernel64/platform/pmap.c
sys/vm/pmap.h

index bbac355..df5b8b0 100644 (file)
@@ -886,40 +886,6 @@ pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
 #endif
 }
 
-void
-pmap_qenter2(vm_offset_t va, vm_page_t *m, int count, cpumask_t *mask)
-{
-       vm_offset_t end_va;
-       cpumask_t cmask = mycpu->gd_cpumask;
-
-       end_va = va + count * PAGE_SIZE;
-
-       while (va < end_va) {
-               unsigned *pte;
-               unsigned pteval;
-
-               /*
-                * Install the new PTE.  If the pte changed from the prior
-                * mapping we must reset the cpu mask and invalidate the page.
-                * If the pte is the same but we have not seen it on the
-                * current cpu, invlpg the existing mapping.  Otherwise the
-                * entry is optimal and no invalidation is required.
-                */
-               pte = (unsigned *)vtopte(va);
-               pteval = VM_PAGE_TO_PHYS(*m) | PG_A | PG_RW | PG_V | pgeflag;
-               if (*pte != pteval) {
-                       *mask = 0;
-                       *pte = pteval;
-                       cpu_invlpg((void *)va);
-               } else if ((*mask & cmask) == 0) {
-                       cpu_invlpg((void *)va);
-               }
-               va += PAGE_SIZE;
-               m++;
-       }
-       *mask |= cmask;
-}
-
 /*
  * This routine jerks page mappings from the
  * kernel -- it is meant only for temporary mappings.
@@ -3481,57 +3447,3 @@ pmap_get_pgeflag(void)
 {
        return pgeflag;
 }
-
-#if defined(DEBUG)
-
-static void    pads (pmap_t pm);
-void           pmap_pvdump (vm_paddr_t pa);
-
-/* print address space of pmap*/
-static void
-pads(pmap_t pm)
-{
-       unsigned va, i, j;
-       unsigned *ptep;
-
-       if (pm == &kernel_pmap)
-               return;
-       crit_enter();
-       for (i = 0; i < 1024; i++) {
-               if (pm->pm_pdir[i]) {
-                       for (j = 0; j < 1024; j++) {
-                               va = (i << PDRSHIFT) + (j << PAGE_SHIFT);
-                               if (pm == &kernel_pmap && va < KERNBASE)
-                                       continue;
-                               if (pm != &kernel_pmap && va > UPT_MAX_ADDRESS)
-                                       continue;
-                               ptep = pmap_pte_quick(pm, va);
-                               if (pmap_pte_v(ptep))
-                                       kprintf("%x:%x ", va, *(int *) ptep);
-                       };
-               }
-       }
-       crit_exit();
-
-}
-
-void
-pmap_pvdump(vm_paddr_t pa)
-{
-       pv_entry_t pv;
-       vm_page_t m;
-
-       kprintf("pa %08llx", (long long)pa);
-       m = PHYS_TO_VM_PAGE(pa);
-       TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
-#ifdef used_to_be
-               kprintf(" -> pmap %p, va %p, flags %x",
-                   (void *)pv->pv_pmap, (long)pv->pv_va, pv->pv_flags);
-#endif
-               kprintf(" -> pmap %p, va %p",
-                       (void *)pv->pv_pmap, (void *)pv->pv_va);
-               pads(pv->pv_pmap);
-       }
-       kprintf(" ");
-}
-#endif
index e85eabb..635b5e3 100644 (file)
@@ -261,9 +261,6 @@ void        pmap_bootstrap (vm_paddr_t *);
 void   *pmap_mapdev (vm_paddr_t, vm_size_t);
 void   *pmap_mapdev_uncacheable(vm_paddr_t, vm_size_t);
 void   pmap_unmapdev (vm_offset_t, vm_size_t);
-#if JG
-pt_entry_t *pmap_pte (pmap_t, vm_offset_t) __pure2;
-#endif
 struct vm_page *pmap_use_pt (pmap_t, vm_offset_t);
 #ifdef SMP
 void   pmap_set_opt (void);
index 7c45c0d..9d00902 100644 (file)
@@ -1055,40 +1055,6 @@ pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
 #endif
 }
 
-void
-pmap_qenter2(vm_offset_t va, vm_page_t *m, int count, cpumask_t *mask)
-{
-       vm_offset_t end_va;
-       cpumask_t cmask = mycpu->gd_cpumask;
-
-       end_va = va + count * PAGE_SIZE;
-
-       while (va < end_va) {
-               pt_entry_t *pte;
-               pt_entry_t pteval;
-
-               /*
-                * Install the new PTE.  If the pte changed from the prior
-                * mapping we must reset the cpu mask and invalidate the page.
-                * If the pte is the same but we have not seen it on the
-                * current cpu, invlpg the existing mapping.  Otherwise the
-                * entry is optimal and no invalidation is required.
-                */
-               pte = vtopte(va);
-               pteval = VM_PAGE_TO_PHYS(*m) | PG_A | PG_RW | PG_V | pgeflag;
-               if (*pte != pteval) {
-                       *mask = 0;
-                       *pte = pteval;
-                       cpu_invlpg((void *)va);
-               } else if ((*mask & cmask) == 0) {
-                       cpu_invlpg((void *)va);
-               }
-               va += PAGE_SIZE;
-               m++;
-       }
-       *mask |= cmask;
-}
-
 /*
  * This routine jerks page mappings from the
  * kernel -- it is meant only for temporary mappings.
@@ -3811,48 +3777,3 @@ pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
        addr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
        return addr;
 }
-
-
-#if defined(DEBUG)
-
-static void    pads (pmap_t pm);
-void           pmap_pvdump (vm_paddr_t pa);
-
-/* print address space of pmap*/
-static
-void
-pads(pmap_t pm)
-{
-       vm_offset_t va;
-       unsigned i, j;
-       pt_entry_t *ptep;
-
-       if (pm == &kernel_pmap)
-               return;
-       crit_enter();
-       for (i = 0; i < NPDEPG; i++) {
-               ;
-       }
-       crit_exit();
-
-}
-
-void
-pmap_pvdump(vm_paddr_t pa)
-{
-       pv_entry_t pv;
-       vm_page_t m;
-
-       kprintf("pa %08llx", (long long)pa);
-       m = PHYS_TO_VM_PAGE(pa);
-       TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
-#ifdef used_to_be
-               kprintf(" -> pmap %p, va %x, flags %x",
-                   (void *)pv->pv_pmap, pv->pv_va, pv->pv_flags);
-#endif
-               kprintf(" -> pmap %p, va %x", (void *)pv->pv_pmap, pv->pv_va);
-               pads(pv->pv_pmap);
-       }
-       kprintf(" ");
-}
-#endif
index b526a64..9374dd3 100644 (file)
@@ -809,38 +809,6 @@ pmap_qenter(vm_offset_t va, struct vm_page **m, int count)
        }
 }
 
-/*
- * Map a set of VM pages to kernel virtual memory.  If a mapping changes
- * clear the supplied mask.  The caller handles any SMP interactions.
- * The mask is used to provide the caller with hints on what SMP interactions
- * might be needed.
- */
-void
-pmap_qenter2(vm_offset_t va, struct vm_page **m, int count, cpumask_t *mask)
-{
-       cpumask_t cmask = mycpu->gd_cpumask;
-
-       KKASSERT(va >= KvaStart && va + count * PAGE_SIZE < KvaEnd);
-       while (count) {
-               vpte_t *ptep;
-               vpte_t npte;
-
-               ptep = KernelPTA + (va >> PAGE_SHIFT);
-               npte = (vpte_t)(*m)->phys_addr | VPTE_R | VPTE_W | VPTE_V;
-               if (*ptep != npte) {
-                       *mask = 0;
-                       pmap_inval_pte_quick(ptep, &kernel_pmap, va);
-                       *ptep = npte;
-               } else if ((*mask & cmask) == 0) {
-                       pmap_kenter_sync_quick(va);
-               }
-               --count;
-               ++m;
-               va += PAGE_SIZE;
-       }
-       *mask |= cmask;
-}
-
 /*
  * Undo the effects of pmap_qenter*().
  */
@@ -2985,57 +2953,3 @@ pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
        return addr;
 }
 
-
-#if defined(DEBUG)
-
-static void    pads (pmap_t pm);
-void           pmap_pvdump (vm_paddr_t pa);
-
-/* print address space of pmap*/
-static void
-pads(pmap_t pm)
-{
-       vm_offset_t va;
-       int i, j;
-       vpte_t *ptep;
-
-       if (pm == &kernel_pmap)
-               return;
-       for (i = 0; i < 1024; i++) {
-               if (pm->pm_pdir[i]) {
-                       for (j = 0; j < 1024; j++) {
-                               va = (i << PDRSHIFT) + (j << PAGE_SHIFT);
-                               if (pm == &kernel_pmap && va < KERNBASE)
-                                       continue;
-                               if (pm != &kernel_pmap && va > UPT_MAX_ADDRESS)
-                                       continue;
-                               ptep = pmap_pte(pm, va);
-                               if (ptep && (*ptep & VPTE_V)) {
-                                       kprintf("%p:%x ",
-                                               (void *)va, (unsigned)*ptep);
-                               }
-                       };
-               }
-       }
-}
-
-void
-pmap_pvdump(vm_paddr_t pa)
-{
-       pv_entry_t pv;
-       vm_page_t m;
-
-       kprintf("pa %08llx", (long long)pa);
-       m = PHYS_TO_VM_PAGE(pa);
-       TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
-#ifdef used_to_be
-               kprintf(" -> pmap %p, va %x, flags %x",
-                   (void *)pv->pv_pmap, pv->pv_va, pv->pv_flags);
-#endif
-               kprintf(" -> pmap %p, va %x", (void *)pv->pv_pmap, pv->pv_va);
-               pads(pv->pv_pmap);
-       }
-       kprintf(" ");
-}
-#endif
-
index 21eaa72..c1eb456 100644 (file)
@@ -835,40 +835,6 @@ pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
        }
 }
 
-/*
- * Map a set of VM pages to kernel virtual memory.  If a mapping changes
- * clear the supplied mask.  The caller handles any SMP interactions.
- * The mask is used to provide the caller with hints on what SMP interactions
- * might be needed.
- */
-void
-pmap_qenter2(vm_offset_t va, vm_page_t *m, int count, cpumask_t *mask)
-{
-       vm_offset_t end_va;
-       cpumask_t cmask = mycpu->gd_cpumask;
-
-       end_va = va + count * PAGE_SIZE;
-       KKASSERT(va >= KvaStart && end_va < KvaEnd);
-
-       while (va < end_va) {
-               pt_entry_t *pte;
-               pt_entry_t pteval;
-
-               pte = vtopte(va);
-               pteval = VM_PAGE_TO_PHYS(*m) | VPTE_R | VPTE_W | VPTE_V;
-               if (*pte != pteval) {
-                       *mask = 0;
-                       pmap_inval_pte_quick(pte, &kernel_pmap, va);
-                       *pte = pteval;
-               } else if ((*mask & cmask) == 0) {
-                       pmap_kenter_sync_quick(va);
-               }
-               va += PAGE_SIZE;
-               m++;
-       }
-       *mask |= cmask;
-}
-
 /*
  * Undo the effects of pmap_qenter*().
  */
@@ -3218,60 +3184,3 @@ pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
        addr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
        return addr;
 }
-
-
-#if defined(DEBUG)
-
-static void    pads (pmap_t pm);
-void           pmap_pvdump (vm_paddr_t pa);
-
-/* print address space of pmap*/
-static void
-pads(pmap_t pm)
-{
-       vm_offset_t va;
-       unsigned i, j;
-       pt_entry_t *ptep;
-
-       if (pm == &kernel_pmap)
-               return;
-       crit_enter();
-       for (i = 0; i < NPDEPG; i++) {
-#if JGPMAP32
-               if (pm->pm_pdir[i]) {
-                       for (j = 0; j < NPTEPG; j++) {
-                               va = (i << PDRSHIFT) + (j << PAGE_SHIFT);
-                               if (pm == &kernel_pmap && va < KERNBASE)
-                                       continue;
-                               if (pm != &kernel_pmap && va > UPT_MAX_ADDRESS)
-                                       continue;
-                               ptep = pmap_pte_quick(pm, va);
-                               if (pmap_pte_v(ptep))
-                                       kprintf("%lx:%lx ", va, *ptep);
-                       };
-               }
-#endif
-       }
-       crit_exit();
-
-}
-
-void
-pmap_pvdump(vm_paddr_t pa)
-{
-       pv_entry_t pv;
-       vm_page_t m;
-
-       kprintf("pa %08llx", (long long)pa);
-       m = PHYS_TO_VM_PAGE(pa);
-       TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
-#ifdef used_to_be
-               kprintf(" -> pmap %p, va %x, flags %x",
-                   (void *)pv->pv_pmap, pv->pv_va, pv->pv_flags);
-#endif
-               kprintf(" -> pmap %p, va %lx", (void *)pv->pv_pmap, pv->pv_va);
-               pads(pv->pv_pmap);
-       }
-       kprintf(" ");
-}
-#endif
index e360186..c2e2154 100644 (file)
@@ -169,8 +169,6 @@ void                 pmap_pinit0 (pmap_t);
 void            pmap_pinit2 (pmap_t);
 void            pmap_protect (pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
 void            pmap_qenter (vm_offset_t, struct vm_page **, int);
-void            pmap_qenter2 (vm_offset_t, struct vm_page **, int,
-                   cpumask_t *);
 void            pmap_qremove (vm_offset_t, int);
 void            pmap_kenter (vm_offset_t, vm_paddr_t);
 void            pmap_kenter_quick (vm_offset_t, vm_paddr_t);