kernel - Refactor phys_avail[] and dump_avail[]
authorMatthew Dillon <dillon@apollo.backplane.com>
Fri, 6 Jan 2017 00:33:49 +0000 (16:33 -0800)
committerMatthew Dillon <dillon@apollo.backplane.com>
Fri, 6 Jan 2017 00:33:49 +0000 (16:33 -0800)
* Refactor phys_avail[] and dump_avail[] into a more understandable
  structure.

sys/dev/drm/linux_iomapping.c
sys/platform/pc64/include/pmap.h
sys/platform/pc64/include/vmparam.h
sys/platform/pc64/x86_64/dump_machdep.c
sys/platform/pc64/x86_64/machdep.c
sys/platform/pc64/x86_64/minidump_machdep.c
sys/platform/pc64/x86_64/nexus.c
sys/platform/vkernel64/platform/init.c
sys/vm/pmap.h
sys/vm/vm_page.c

index b8c6923..64b2779 100644 (file)
@@ -75,9 +75,10 @@ void iounmap(void __iomem *ptr)
 
        paddr_end = imp->paddr + (imp->npages * PAGE_SIZE) - 1;
        /* Is this address space range backed by regular memory ? */
-       for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
-               vm_paddr_t range_start = phys_avail[indx];
-               vm_paddr_t size = phys_avail[indx + 1] - phys_avail[indx];
+       for (indx = 0; phys_avail[indx].phys_end != 0; ++indx) {
+               vm_paddr_t range_start = phys_avail[indx].phys_beg;
+               vm_paddr_t size = phys_avail[indx].phys_end -
+                                 phys_avail[indx].phys_beg;
                vm_paddr_t range_end = range_start + size - 1;
 
                if ((imp->paddr >= range_start) && (paddr_end <= range_end)) {
index dda83d9..b75bfe5 100644 (file)
@@ -343,7 +343,6 @@ typedef struct pv_entry {
 
 extern caddr_t CADDR1;
 extern pt_entry_t *CMAP1;
-extern vm_paddr_t dump_avail[];
 extern vm_paddr_t avail_end;
 extern vm_paddr_t avail_start;
 extern vm_offset_t clean_eva;
index 0638c2d..113fe32 100644 (file)
@@ -93,7 +93,7 @@
  * largest physical address that is accessible by ISA DMA is split
  * into two PHYSSEG entries. 
  */
-#define        VM_PHYSSEG_MAX          31
+#define        VM_PHYSSEG_MAX          127
 
 /*
  * Virtual addresses of things.  Derived from the page directory and
index baa7dd0..fc3b210 100644 (file)
@@ -40,6 +40,7 @@
 #include <machine/elf.h>
 #include <machine/md_var.h>
 #include <machine/thread.h>
+#include <machine/vmparam.h>
 #include <sys/thread2.h>
 
 CTASSERT(sizeof(struct kerneldumpheader) == 512);
@@ -72,21 +73,20 @@ static off_t dumplo, fileofs;
 static char buffer[DEV_BSIZE];
 static size_t fragsz;
 
-/* 20 phys_avail entry pairs correspond to 10 md_pa's */
-static struct md_pa dump_map[10];
+static struct md_pa dump_map[VM_PHYSSEG_MAX+1];
 
 static void
 md_pa_init(void)
 {
-       int n, idx;
+       int n;
 
        bzero(dump_map, sizeof(dump_map));
        for (n = 0; n < NELEM(dump_map); n++) {
-               idx = n * 2;
-               if (dump_avail[idx] == 0 && dump_avail[idx + 1] == 0)
+               if (dump_avail[n].phys_beg == 0 && dump_avail[n].phys_end == 0)
                        break;
-               dump_map[n].md_start = dump_avail[idx];
-               dump_map[n].md_size = dump_avail[idx + 1] - dump_avail[idx];
+               dump_map[n].md_start = dump_avail[n].phys_beg;
+               dump_map[n].md_size = dump_avail[n].phys_end -
+                                     dump_avail[n].phys_beg;
        }
 }
 
index 6a2f8ac..947a4b5 100644 (file)
@@ -302,14 +302,12 @@ vm_paddr_t Realmem;
  * physical address that is accessible by ISA DMA is split into two
  * PHYSSEG entries.
  */
-#define        PHYSMAP_SIZE    (2 * (VM_PHYSSEG_MAX - 1))
+vm_phystable_t phys_avail[VM_PHYSSEG_MAX + 1];
+vm_phystable_t dump_avail[VM_PHYSSEG_MAX + 1];
 
-vm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
-vm_paddr_t dump_avail[PHYSMAP_SIZE + 2];
-
-/* must be 2 less so 0 0 can signal end of chunks */
-#define PHYS_AVAIL_ARRAY_END (NELEM(phys_avail) - 2)
-#define DUMP_AVAIL_ARRAY_END (NELEM(dump_avail) - 2)
+/* must be 1 less so 0 0 can signal end of chunks */
+#define PHYS_AVAIL_ARRAY_END (NELEM(phys_avail) - 1)
+#define DUMP_AVAIL_ARRAY_END (NELEM(dump_avail) - 1)
 
 static vm_offset_t buffer_sva, buffer_eva;
 vm_offset_t clean_sva, clean_eva;
@@ -340,12 +338,15 @@ cpu_startup(void *dummy)
                int indx;
 
                kprintf("Physical memory chunk(s):\n");
-               for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
-                       vm_paddr_t size1 = phys_avail[indx + 1] - phys_avail[indx];
+               for (indx = 0; phys_avail[indx].phys_end != 0; ++indx) {
+                       vm_paddr_t size1;
+
+                       size1 = phys_avail[indx].phys_end -
+                               phys_avail[indx].phys_beg;
 
                        kprintf("0x%08jx - 0x%08jx, %ju bytes (%ju pages)\n",
-                               (intmax_t)phys_avail[indx],
-                               (intmax_t)phys_avail[indx + 1] - 1,
+                               (intmax_t)phys_avail[indx].phys_beg,
+                               (intmax_t)phys_avail[indx].phys_end - 1,
                                (intmax_t)size1,
                                (intmax_t)(size1 / PAGE_SIZE));
                }
@@ -1652,10 +1653,13 @@ ssdtosyssd(struct soft_segment_descriptor *ssd,
 
 #define PHYSMAP_ALIGN          (vm_paddr_t)(128 * 1024)
 #define PHYSMAP_ALIGN_MASK     (vm_paddr_t)(PHYSMAP_ALIGN - 1)
-       vm_paddr_t physmap[PHYSMAP_SIZE];
-       struct bios_smap *smapbase, *smap, *smapend;
-       struct efi_map_header *efihdrbase;
-       u_int32_t smapsize;
+#define PHYSMAP_SIZE           VM_PHYSSEG_MAX
+
+vm_paddr_t physmap[PHYSMAP_SIZE];
+struct bios_smap *smapbase, *smap, *smapend;
+struct efi_map_header *efihdrbase;
+u_int32_t smapsize;
+
 #define PHYSMAP_HANDWAVE       (vm_paddr_t)(2 * 1024 * 1024)
 #define PHYSMAP_HANDWAVE_MASK  (PHYSMAP_HANDWAVE - 1)
 
@@ -1906,9 +1910,9 @@ getmemsize(caddr_t kmdp, u_int64_t first)
         * ie: an int32_t immediately precedes smap.
         */
        efihdrbase = (struct efi_map_header *)preload_search_info(kmdp,
-           MODINFO_METADATA | MODINFOMD_EFI_MAP);
+                    MODINFO_METADATA | MODINFOMD_EFI_MAP);
        smapbase = (struct bios_smap *)preload_search_info(kmdp,
-           MODINFO_METADATA | MODINFOMD_SMAP);
+                  MODINFO_METADATA | MODINFOMD_SMAP);
        if (smapbase == NULL && efihdrbase == NULL)
                panic("No BIOS smap or EFI map info from loader!");
 
@@ -1996,10 +2000,11 @@ getmemsize(caddr_t kmdp, u_int64_t first)
         * Size up each available chunk of physical memory.
         */
        pa_indx = 0;
-       da_indx = 1;
-       phys_avail[pa_indx++] = physmap[0];
-       phys_avail[pa_indx] = physmap[0];
-       dump_avail[da_indx] = physmap[0];
+       da_indx = 0;
+       phys_avail[pa_indx].phys_beg = physmap[0];
+       phys_avail[pa_indx].phys_end = physmap[0];
+       dump_avail[da_indx].phys_beg = physmap[0];
+       dump_avail[da_indx].phys_end = physmap[0];
        pte = CMAP1;
 
        /*
@@ -2122,32 +2127,32 @@ handwaved:
                         * so that we keep going. The first bad page
                         * will terminate the loop.
                         */
-                       if (phys_avail[pa_indx] == pa) {
-                               phys_avail[pa_indx] += incr;
+                       if (phys_avail[pa_indx].phys_end == pa) {
+                               phys_avail[pa_indx].phys_end += incr;
                        } else {
-                               pa_indx++;
+                               ++pa_indx;
                                if (pa_indx == PHYS_AVAIL_ARRAY_END) {
                                        kprintf(
                "Too many holes in the physical address space, giving up\n");
-                                       pa_indx--;
+                                       --pa_indx;
                                        full = TRUE;
                                        goto do_dump_avail;
                                }
-                               phys_avail[pa_indx++] = pa;
-                               phys_avail[pa_indx] = pa + incr;
+                               phys_avail[pa_indx].phys_beg = pa;
+                               phys_avail[pa_indx].phys_end = pa + incr;
                        }
                        physmem += incr / PAGE_SIZE;
 do_dump_avail:
-                       if (dump_avail[da_indx] == pa) {
-                               dump_avail[da_indx] += incr;
+                       if (dump_avail[da_indx].phys_end == pa) {
+                               dump_avail[da_indx].phys_end += incr;
                        } else {
-                               da_indx++;
+                               ++da_indx;
                                if (da_indx == DUMP_AVAIL_ARRAY_END) {
-                                       da_indx--;
+                                       --da_indx;
                                        goto do_next;
                                }
-                               dump_avail[da_indx++] = pa;
-                               dump_avail[da_indx] = pa + incr;
+                               dump_avail[da_indx].phys_beg = pa;
+                               dump_avail[da_indx].phys_end = pa + incr;
                        }
 do_next:
                        if (full)
@@ -2165,24 +2170,25 @@ do_next:
         */
        msgbuf_size = (MSGBUF_SIZE + PHYSMAP_ALIGN_MASK) & ~PHYSMAP_ALIGN_MASK;
 
-       while (phys_avail[pa_indx - 1] + PHYSMAP_ALIGN +
-              msgbuf_size >= phys_avail[pa_indx]) {
-               physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
-               phys_avail[pa_indx--] = 0;
-               phys_avail[pa_indx--] = 0;
+       while (phys_avail[pa_indx].phys_beg + PHYSMAP_ALIGN + msgbuf_size >=
+              phys_avail[pa_indx].phys_end) {
+               physmem -= atop(phys_avail[pa_indx].phys_end -
+                               phys_avail[pa_indx].phys_beg);
+               phys_avail[pa_indx].phys_beg = 0;
+               phys_avail[pa_indx].phys_end = 0;
+               --pa_indx;
        }
 
-       Maxmem = atop(phys_avail[pa_indx]);
+       Maxmem = atop(phys_avail[pa_indx].phys_end);
 
        /* Trim off space for the message buffer. */
-       phys_avail[pa_indx] -= msgbuf_size;
+       phys_avail[pa_indx].phys_end -= msgbuf_size;
 
-       avail_end = phys_avail[pa_indx];
+       avail_end = phys_avail[pa_indx].phys_end;
 
        /* Map the message buffer. */
        for (off = 0; off < msgbuf_size; off += PAGE_SIZE) {
-               pmap_kenter((vm_offset_t)msgbufp + off,
-                           phys_avail[pa_indx] + off);
+               pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off);
        }
        /* Try to get EFI framebuffer working as early as possible */
        if (have_efi_framebuffer)
index 33164b0..9c214c5 100644 (file)
@@ -77,8 +77,8 @@ is_dumpable(vm_paddr_t pa)
 {
        int i;
 
-       for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) {
-               if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
+       for (i = 0; dump_avail[i].phys_beg || dump_avail[i].phys_end; ++i) {
+               if (pa >= dump_avail[i].phys_beg && pa < dump_avail[i].phys_end)
                        return (1);
        }
        return (0);
index 058959d..0a86399 100644 (file)
@@ -656,7 +656,7 @@ ram_attach(device_t dev)
 {
        struct bios_smap *smapbase, *smap, *smapend;
        struct resource *res;
-       vm_paddr_t *p;
+       vm_phystable_t *p;
        caddr_t kmdp;
        uint32_t smapsize;
        int error, rid;
@@ -710,16 +710,18 @@ ram_attach(device_t dev)
         * instead of the start since the start address for the first
         * segment is 0.
         */
-       for (rid = 0, p = dump_avail; p[1] != 0; rid++, p += 2) {
-               error = bus_set_resource(dev, SYS_RES_MEMORY, rid, p[0],
-                   p[1] - p[0], -1);
+       for (rid = 0, p = &dump_avail[0]; p->phys_end; ++rid, ++p) {
+               error = bus_set_resource(dev, SYS_RES_MEMORY, rid,
+                                        p->phys_beg,
+                                        p->phys_end - p->phys_beg,
+                                        -1);
                if (error)
                        panic("%s: resource %d failed set with %d", __func__,
-                           rid, error);
+                             rid, error);
                res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 0);
                if (res == NULL)
                        panic("%s: resource %d failed to attach", __func__,
-                           rid);
+                             rid);
        }
        return (0);
 }
index a3b2f34..bab7dd6 100644 (file)
@@ -84,7 +84,7 @@
 
 #define EX_VKERNEL_REBOOT      32
 
-vm_paddr_t phys_avail[16];
+vm_phystable_t phys_avail[16];
 vm_paddr_t Maxmem;
 vm_paddr_t Maxmem_bytes;
 long physmem;
@@ -635,9 +635,10 @@ init_kern_memory(void)
         * phys_avail[] represents unallocated physical memory.  MI code
         * will use phys_avail[] to create the vm_page array.
         */
-       phys_avail[0] = (vm_paddr_t)firstfree;
-       phys_avail[0] = (phys_avail[0] + PAGE_MASK) & ~(vm_paddr_t)PAGE_MASK;
-       phys_avail[1] = Maxmem_bytes;
+       phys_avail[0].phys_beg = (vm_paddr_t)firstfree;
+       phys_avail[0].phys_beg = (phys_avail[0].phys_beg + PAGE_MASK) &
+                                ~(vm_paddr_t)PAGE_MASK;
+       phys_avail[0].phys_end = Maxmem_bytes;
 
 #if JGV
        /*
@@ -659,9 +660,9 @@ init_kern_memory(void)
         */
        proc0paddr = (void *)virtual_start;
        for (i = 0; i < UPAGES; ++i) {
-               pmap_kenter_quick(virtual_start, phys_avail[0]);
+               pmap_kenter_quick(virtual_start, phys_avail[0].phys_beg);
                virtual_start += PAGE_SIZE;
-               phys_avail[0] += PAGE_SIZE;
+               phys_avail[0].phys_beg += PAGE_SIZE;
        }
 
        /*
@@ -676,9 +677,9 @@ init_kern_memory(void)
        assert((MSGBUF_SIZE & PAGE_MASK) == 0);
        msgbufp = (void *)virtual_start;
        for (i = 0; i < (MSGBUF_SIZE >> PAGE_SHIFT); ++i) {
-               pmap_kenter_quick(virtual_start, phys_avail[0]);
+               pmap_kenter_quick(virtual_start, phys_avail[0].phys_beg);
                virtual_start += PAGE_SIZE;
-               phys_avail[0] += PAGE_SIZE;
+               phys_avail[0].phys_beg += PAGE_SIZE;
        }
        msgbufinit(msgbufp, MSGBUF_SIZE);
 
@@ -761,9 +762,10 @@ init_kern_memory_vmm(void)
         * phys_avail[] represents unallocated physical memory.  MI code
         * will use phys_avail[] to create the vm_page array.
         */
-       phys_avail[0] = (vm_paddr_t)firstfree;
-       phys_avail[0] = (phys_avail[0] + PAGE_MASK) & ~(vm_paddr_t)PAGE_MASK;
-       phys_avail[1] = (vm_paddr_t)dmap_address + Maxmem_bytes;
+       phys_avail[0].phys_beg = (vm_paddr_t)firstfree;
+       phys_avail[0].phys_beg = (phys_avail[0].phys_beg + PAGE_MASK) &
+                                ~(vm_paddr_t)PAGE_MASK;
+       phys_avail[0].phys_end = (vm_paddr_t)dmap_address + Maxmem_bytes;
 
        /*
         * pmap_growkernel() will set the correct value.
@@ -775,9 +777,9 @@ init_kern_memory_vmm(void)
         */
        proc0paddr = (void *)virtual_start;
        for (i = 0; i < UPAGES; ++i) {
-               pmap_kenter_quick(virtual_start, phys_avail[0]);
+               pmap_kenter_quick(virtual_start, phys_avail[0].phys_beg);
                virtual_start += PAGE_SIZE;
-               phys_avail[0] += PAGE_SIZE;
+               phys_avail[0].phys_beg += PAGE_SIZE;
        }
 
        /*
@@ -793,9 +795,9 @@ init_kern_memory_vmm(void)
        msgbufp = (void *)virtual_start;
        for (i = 0; i < (MSGBUF_SIZE >> PAGE_SHIFT); ++i) {
 
-               pmap_kenter_quick(virtual_start, phys_avail[0]);
+               pmap_kenter_quick(virtual_start, phys_avail[0].phys_beg);
                virtual_start += PAGE_SIZE;
-               phys_avail[0] += PAGE_SIZE;
+               phys_avail[0].phys_beg += PAGE_SIZE;
        }
 
        msgbufinit(msgbufp, MSGBUF_SIZE);
@@ -837,16 +839,16 @@ init_globaldata(void)
         * into KVA.  For cpu #0 only.
         */
        for (i = 0; i < sizeof(struct mdglobaldata); i += PAGE_SIZE) {
-               pa = phys_avail[0];
+               pa = phys_avail[0].phys_beg;
                va = (vm_offset_t)&CPU_prvspace[0].mdglobaldata + i;
                pmap_kenter_quick(va, pa);
-               phys_avail[0] += PAGE_SIZE;
+               phys_avail[0].phys_beg += PAGE_SIZE;
        }
        for (i = 0; i < sizeof(CPU_prvspace[0].idlestack); i += PAGE_SIZE) {
-               pa = phys_avail[0];
+               pa = phys_avail[0].phys_beg;
                va = (vm_offset_t)&CPU_prvspace[0].idlestack + i;
                pmap_kenter_quick(va, pa);
-               phys_avail[0] += PAGE_SIZE;
+               phys_avail[0].phys_beg += PAGE_SIZE;
        }
 
        /*
@@ -913,7 +915,8 @@ init_vkernel(void)
 #if 0
        initializecpu();        /* Initialize CPU registers */
 #endif
-       init_param2((phys_avail[1] - phys_avail[0]) / PAGE_SIZE);
+       init_param2((phys_avail[0].phys_end -
+                    phys_avail[0].phys_beg) / PAGE_SIZE);
 
 #if 0
        /*
index b0b2d14..f30b7a4 100644 (file)
@@ -108,6 +108,13 @@ struct pmap_pgscan_info {
                                    struct vm_page *);
 };
 
+typedef struct vm_phystable {
+       vm_paddr_t      phys_beg;
+       vm_paddr_t      phys_end;
+       uint32_t        flags;
+       uint32_t        affinity;
+} vm_phystable_t;
+
 /*
  * Most of these variables represent parameters set up by low level MD kernel
  * boot code to be used by higher level MI initialization code to identify
@@ -134,7 +141,8 @@ extern vm_offset_t virtual_start;
 extern vm_offset_t virtual_end;
 extern vm_offset_t virtual2_start;
 extern vm_offset_t virtual2_end;
-extern vm_paddr_t phys_avail[];        
+extern vm_phystable_t phys_avail[];
+extern vm_phystable_t dump_avail[];
 
 /*
  * High-level pmap scan
index 53baaeb..94643eb 100644 (file)
@@ -280,7 +280,6 @@ vm_page_startup(void)
        vm_paddr_t new_end;
        int i;
        vm_paddr_t pa;
-       int nblocks;
        vm_paddr_t last_pa;
        vm_paddr_t end;
        vm_paddr_t biggestone, biggestsize;
@@ -289,26 +288,33 @@ vm_page_startup(void)
        total = 0;
        biggestsize = 0;
        biggestone = 0;
-       nblocks = 0;
        vaddr = round_page(vaddr);
 
-       for (i = 0; phys_avail[i + 1]; i += 2) {
-               phys_avail[i] = round_page64(phys_avail[i]);
-               phys_avail[i + 1] = trunc_page64(phys_avail[i + 1]);
+       /*
+        * Make sure ranges are page-aligned.
+        */
+       for (i = 0; phys_avail[i].phys_end; ++i) {
+               phys_avail[i].phys_beg = round_page64(phys_avail[i].phys_beg);
+               phys_avail[i].phys_end = trunc_page64(phys_avail[i].phys_end);
+               if (phys_avail[i].phys_end < phys_avail[i].phys_beg)
+                       phys_avail[i].phys_end = phys_avail[i].phys_beg;
        }
 
-       for (i = 0; phys_avail[i + 1]; i += 2) {
-               vm_paddr_t size = phys_avail[i + 1] - phys_avail[i];
+       /*
+        * Locate largest block
+        */
+       for (i = 0; phys_avail[i].phys_end; ++i) {
+               vm_paddr_t size = phys_avail[i].phys_end -
+                                 phys_avail[i].phys_beg;
 
                if (size > biggestsize) {
                        biggestone = i;
                        biggestsize = size;
                }
-               ++nblocks;
                total += size;
        }
 
-       end = phys_avail[biggestone+1];
+       end = phys_avail[biggestone].phys_end;
        end = trunc_page(end);
 
        /*
@@ -332,7 +338,7 @@ vm_page_startup(void)
         * minidump code.  In theory, they are not needed on i386, but are
         * included should the sf_buf code decide to use them.
         */
-       page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE;
+       page_range = phys_avail[i-1].phys_end / PAGE_SIZE;
        vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY);
        end -= vm_page_dump_size;
        vm_page_dump = (void *)pmap_map(&vaddr, end, end + vm_page_dump_size,
@@ -344,8 +350,8 @@ vm_page_startup(void)
         * use (taking into account the overhead of a page structure per
         * page).
         */
-       first_page = phys_avail[0] / PAGE_SIZE;
-       page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE - first_page;
+       first_page = phys_avail[0].phys_beg / PAGE_SIZE;
+       page_range = phys_avail[i-1].phys_end / PAGE_SIZE - first_page;
        npages = (total - (page_range * sizeof(struct vm_page))) / PAGE_SIZE;
 
 #ifndef _KERNEL_VIRTUAL
@@ -386,8 +392,11 @@ vm_page_startup(void)
         * we have to manually add these pages to the minidump tracking so
         * that they can be dumped, including the vm_page_array.
         */
-       for (pa = new_end; pa < phys_avail[biggestone + 1]; pa += PAGE_SIZE)
+       for (pa = new_end;
+            pa < phys_avail[biggestone].phys_end;
+            pa += PAGE_SIZE) {
                dump_add_page(pa);
+       }
 #endif
 
        /*
@@ -404,12 +413,12 @@ vm_page_startup(void)
         */
        vmstats.v_page_count = 0;
        vmstats.v_free_count = 0;
-       for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
-               pa = phys_avail[i];
+       for (i = 0; phys_avail[i].phys_end && npages > 0; ++i) {
+               pa = phys_avail[i].phys_beg;
                if (i == biggestone)
                        last_pa = new_end;
                else
-                       last_pa = phys_avail[i + 1];
+                       last_pa = phys_avail[i].phys_end;
                while (pa < last_pa && npages-- > 0) {
                        vm_add_new_page(pa);
                        pa += PAGE_SIZE;