kernel - Expand page count fields to 64 bits
authorMatthew Dillon <dillon@apollo.backplane.com>
Mon, 4 Dec 2017 01:14:15 +0000 (17:14 -0800)
committerMatthew Dillon <dillon@apollo.backplane.com>
Mon, 4 Dec 2017 01:14:15 +0000 (17:14 -0800)
* 32 bit page count fields limit us to 8TB of ram.  Expand to allow
  up to the DMAP limit (32TB).  Do an initial pass on various page
  count fields and change them from int's to long's or vm_pindex_t's.

* Fix a 32-bit overflow in the pv_entry initialization code.

  pv_entry_max = shpgperproc * maxproc + vm_page_array_size;
  2000 * 1046516 + pages_of_phys_memory;

  maxproc is 1046516 @ 512GB.  This calculation overflows its 32
  bit signed variable somewhere between 256G and 512G of ram.  This
  can lead to a zinitna() allocation in pvzone that is much too
  large.

Reported-by: zrj
sys/platform/pc64/x86_64/pmap.c
sys/platform/vkernel64/platform/pmap.c
sys/sys/vmmeter.h
sys/vm/vm_contig.c
sys/vm/vm_meter.c
sys/vm/vm_page.c
sys/vm/vm_page.h
sys/vm/vm_page2.h
sys/vm/vm_pageout.c

index a12dc9e..ea8f03b 100644 (file)
@@ -194,7 +194,7 @@ static uint64_t     DMPDPphys;      /* phys addr of direct mapped level 3 */
  */
 static vm_zone_t pvzone;
 static struct vm_zone pvzone_store;
-static int pv_entry_max=0, pv_entry_high_water=0;
+static vm_pindex_t pv_entry_max=0, pv_entry_high_water=0;
 static int pmap_pagedaemon_waken = 0;
 static struct pv_entry *pvinit;
 
@@ -1233,8 +1233,8 @@ pmap_set_opt(void)
 void
 pmap_init(void)
 {
-       int i;
-       int initial_pvs;
+       vm_pindex_t initial_pvs;
+       vm_pindex_t i;
 
        /*
         * Allocate memory for random pmap data structures.  Includes the
@@ -1275,12 +1275,12 @@ pmap_init(void)
 void
 pmap_init2(void)
 {
-       int shpgperproc = PMAP_SHPGPERPROC;
-       int entry_max;
+       vm_pindex_t shpgperproc = PMAP_SHPGPERPROC;
+       vm_pindex_t entry_max;
 
-       TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
+       TUNABLE_LONG_FETCH("vm.pmap.shpgperproc", &shpgperproc);
        pv_entry_max = shpgperproc * maxproc + vm_page_array_size;
-       TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
+       TUNABLE_LONG_FETCH("vm.pmap.pv_entries", &pv_entry_max);
        pv_entry_high_water = 9 * (pv_entry_max / 10);
 
        /*
index 52fa200..92c97c2 100644 (file)
@@ -148,9 +148,9 @@ extern void *vkernel_stack;
  */
 static vm_zone_t pvzone;
 static struct vm_zone pvzone_store;
-static int pv_entry_count = 0;
-static int pv_entry_max = 0;
-static int pv_entry_high_water = 0;
+static vm_pindex_t pv_entry_count = 0;
+static vm_pindex_t pv_entry_max = 0;
+static vm_pindex_t pv_entry_high_water = 0;
 static int pmap_pagedaemon_waken = 0;
 static struct pv_entry *pvinit;
 
@@ -633,8 +633,8 @@ pmap_bootstrap(vm_paddr_t *firstaddr, int64_t ptov_offset)
 void
 pmap_init(void)
 {
-       int i;
-       int initial_pvs;
+       vm_pindex_t i;
+       vm_pindex_t initial_pvs;
 
        /*
         * object for kernel page table pages
@@ -647,7 +647,7 @@ pmap_init(void)
         * Allocate memory for random pmap data structures.  Includes the
         * pv_head_table.
         */
-       for(i = 0; i < vm_page_array_size; i++) {
+       for (i = 0; i < vm_page_array_size; i++) {
                vm_page_t m;
 
                m = &vm_page_array[i];
@@ -683,11 +683,11 @@ pmap_init(void)
 void
 pmap_init2(void)
 {
-       int shpgperproc = PMAP_SHPGPERPROC;
+       vm_pindex_t shpgperproc = PMAP_SHPGPERPROC;
 
-       TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
+       TUNABLE_LONG_FETCH("vm.pmap.shpgperproc", &shpgperproc);
        pv_entry_max = shpgperproc * maxproc + vm_page_array_size;
-       TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
+       TUNABLE_LONG_FETCH("vm.pmap.pv_entries", &pv_entry_max);
        pv_entry_high_water = 9 * (pv_entry_max / 10);
        zinitna(pvzone, NULL, 0, pv_entry_max, ZONE_INTERRUPT);
 }
@@ -1788,8 +1788,7 @@ cpu_vmspace_free(struct vmspace *vm)
 static __inline void
 free_pv_entry(pv_entry_t pv)
 {
-       atomic_add_int(&pv_entry_count, -1);
-       KKASSERT(pv_entry_count >= 0);
+       atomic_add_long(&pv_entry_count, -1);
        zfree(pvzone, pv);
 }
 
@@ -1800,7 +1799,7 @@ free_pv_entry(pv_entry_t pv)
 static pv_entry_t
 get_pv_entry(void)
 {
-       atomic_add_int(&pv_entry_count, 1);
+       atomic_add_long(&pv_entry_count, 1);
        if (pv_entry_high_water &&
            (pv_entry_count > pv_entry_high_water) &&
            atomic_swap_int(&pmap_pagedaemon_waken, 1) == 0) {
index 867f982..85d066a 100644 (file)
@@ -120,29 +120,30 @@ struct vmstats {
         * Distribution of page usages.
         */
        u_int v_page_size;      /* page size in bytes */
-       int v_page_count;       /* total number of pages in system */
-       int v_free_reserved;    /* number of pages reserved for deadlock */
-       int v_free_target;      /* number of pages desired free */
-       int v_free_min;         /* minimum number of pages desired free */
-
-       int v_cache_min;        /* min number of pages desired on cache queue */
-       int v_cache_max;        /* max number of pages in cached obj */
-       int v_pageout_free_min; /* min number pages reserved for kernel */
-       int v_interrupt_free_min; /* reserved number of pages for int code */
-       int v_free_severe;      /* severe depletion of pages below this pt */
-       int v_dma_pages;        /* total dma-reserved pages */
-
-       int v_unused_fixed[5];
-
-       int v_free_count;       /* number of pages free */
-       int v_wire_count;       /* number of pages wired down */
-       int v_active_count;     /* number of pages active */
-       int v_inactive_target;  /* number of pages desired inactive */
-       int v_inactive_count;   /* number of pages inactive */
-       int v_cache_count;      /* number of pages on buffer cache queue */
-       int v_dma_avail;        /* free dma-reserved pages */
-
-       int v_unused_variable[9];
+       u_int v_unused01;
+       long v_page_count;      /* total number of pages in system */
+       long v_free_reserved;   /* number of pages reserved for deadlock */
+       long v_free_target;     /* number of pages desired free */
+       long v_free_min;        /* minimum number of pages desired free */
+
+       long v_cache_min;       /* min number of pages desired on cache queue */
+       long v_cache_max;       /* max number of pages in cached obj */
+       long v_pageout_free_min; /* min number pages reserved for kernel */
+       long v_interrupt_free_min; /* reserved number of pages for int code */
+       long v_free_severe;     /* severe depletion of pages below this pt */
+       long v_dma_pages;       /* total dma-reserved pages */
+
+       long v_unused_fixed[5];
+
+       long v_free_count;      /* number of pages free */
+       long v_wire_count;      /* number of pages wired down */
+       long v_active_count;    /* number of pages active */
+       long v_inactive_target; /* number of pages desired inactive */
+       long v_inactive_count;  /* number of pages inactive */
+       long v_cache_count;     /* number of pages on buffer cache queue */
+       long v_dma_avail;       /* free dma-reserved pages */
+
+       long v_unused_variable[9];
 };
 
 #define VMMETER_SLOP_COUNT     128
index 3cdd2d1..6640448 100644 (file)
 #include <sys/spinlock2.h>
 #include <vm/vm_page2.h>
 
-static void vm_contig_pg_free(int start, u_long size);
+static void vm_contig_pg_free(vm_pindex_t start, u_long size);
 
 /*
  * vm_contig_pg_clean:
@@ -140,7 +140,7 @@ static void vm_contig_pg_free(int start, u_long size);
  *     pageout (daemon) flush routine is invoked.
  */
 static void
-vm_contig_pg_clean(int queue, int count)
+vm_contig_pg_clean(int queue, vm_pindex_t count)
 {
        vm_object_t object;
        vm_page_t m, m_tmp;
@@ -165,7 +165,8 @@ vm_contig_pg_clean(int queue, int count)
         * acquired before the pageq spinlock so it's easiest to simply
         * not hold it in the loop iteration.
         */
-       while (count-- > 0 && (m = TAILQ_NEXT(&marker, pageq)) != NULL) {
+       while ((long)count-- > 0 &&
+              (m = TAILQ_NEXT(&marker, pageq)) != NULL) {
                vm_page_and_queue_spin_lock(m);
                if (m != TAILQ_NEXT(&marker, pageq)) {
                        vm_page_and_queue_spin_unlock(m);
@@ -245,14 +246,15 @@ vm_contig_pg_clean(int queue, int count)
  * Malloc()'s data structures have been used for collection of
  * statistics and for allocations of less than a page.
  */
-static int
+static vm_pindex_t
 vm_contig_pg_alloc(unsigned long size, vm_paddr_t low, vm_paddr_t high,
                   unsigned long alignment, unsigned long boundary, int mflags)
 {
-       int i, q, start, pass;
+       vm_pindex_t i, q, start;
        vm_offset_t phys;
        vm_page_t pga = vm_page_array;
        vm_page_t m;
+       int pass;
        int pqtype;
 
        size = round_page(size);
@@ -432,7 +434,7 @@ again:
        /*
         * Failed.
         */
-       return (-1);
+       return ((vm_pindex_t)-1);
 }
 
 /*
@@ -446,7 +448,7 @@ again:
  * No other requirements.
  */
 static void
-vm_contig_pg_free(int start, u_long size)
+vm_contig_pg_free(vm_pindex_t start, u_long size)
 {
        vm_page_t pga = vm_page_array;
        
@@ -473,7 +475,7 @@ vm_contig_pg_free(int start, u_long size)
  * No requirements.
  */
 static vm_offset_t
-vm_contig_pg_kmap(int start, u_long size, vm_map_t map, int flags)
+vm_contig_pg_kmap(vm_pindex_t start, u_long size, vm_map_t map, int flags)
 {
        vm_offset_t addr;
        vm_paddr_t pa;
@@ -521,11 +523,11 @@ contigmalloc_map(unsigned long size, struct malloc_type *type,
                 unsigned long alignment, unsigned long boundary,
                 vm_map_t map)
 {
-       int index;
+       vm_pindex_t index;
        void *rv;
 
        index = vm_contig_pg_alloc(size, low, high, alignment, boundary, flags);
-       if (index < 0) {
+       if (index == (vm_pindex_t)-1) {
                kprintf("contigmalloc_map: failed size %lu low=%llx "
                        "high=%llx align=%lu boundary=%lu flags=%08x\n",
                        size, (long long)low, (long long)high,
index e4670b9..031b63e 100644 (file)
@@ -67,28 +67,28 @@ __cachealign struct vmstats vmstats;
 
 static int maxslp = MAXSLP;
 
-SYSCTL_UINT(_vm, VM_V_FREE_MIN, v_free_min,
+SYSCTL_ULONG(_vm, VM_V_FREE_MIN, v_free_min,
        CTLFLAG_RW, &vmstats.v_free_min, 0,
        "Minimum number of pages desired free");
-SYSCTL_UINT(_vm, VM_V_FREE_TARGET, v_free_target,
+SYSCTL_ULONG(_vm, VM_V_FREE_TARGET, v_free_target,
        CTLFLAG_RW, &vmstats.v_free_target, 0,
        "Number of pages desired free");
-SYSCTL_UINT(_vm, VM_V_FREE_RESERVED, v_free_reserved,
+SYSCTL_ULONG(_vm, VM_V_FREE_RESERVED, v_free_reserved,
        CTLFLAG_RW, &vmstats.v_free_reserved, 0,
        "Number of pages reserved for deadlock");
-SYSCTL_UINT(_vm, VM_V_INACTIVE_TARGET, v_inactive_target,
+SYSCTL_ULONG(_vm, VM_V_INACTIVE_TARGET, v_inactive_target,
        CTLFLAG_RW, &vmstats.v_inactive_target, 0,
        "Number of pages desired inactive");
-SYSCTL_UINT(_vm, VM_V_CACHE_MIN, v_cache_min,
+SYSCTL_ULONG(_vm, VM_V_CACHE_MIN, v_cache_min,
        CTLFLAG_RW, &vmstats.v_cache_min, 0,
        "Min number of pages desired on cache queue");
-SYSCTL_UINT(_vm, VM_V_CACHE_MAX, v_cache_max,
+SYSCTL_ULONG(_vm, VM_V_CACHE_MAX, v_cache_max,
        CTLFLAG_RW, &vmstats.v_cache_max, 0,
        "Max number of pages in cached obj");
-SYSCTL_UINT(_vm, VM_V_PAGEOUT_FREE_MIN, v_pageout_free_min,
+SYSCTL_ULONG(_vm, VM_V_PAGEOUT_FREE_MIN, v_pageout_free_min,
        CTLFLAG_RW, &vmstats.v_pageout_free_min, 0,
        "Min number pages reserved for kernel");
-SYSCTL_UINT(_vm, OID_AUTO, v_free_severe,
+SYSCTL_ULONG(_vm, OID_AUTO, v_free_severe,
        CTLFLAG_RW, &vmstats.v_free_severe, 0, "");
 
 SYSCTL_STRUCT(_vm, VM_LOADAVG, loadavg,
@@ -379,46 +379,46 @@ SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_kthreadpages, CTLTYPE_UINT|CTLFLAG_RD,
 SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
        v_page_size, CTLFLAG_RD, &vmstats.v_page_size, 0,
        "Page size in bytes");
-SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
+SYSCTL_ULONG(_vm_stats_vm, OID_AUTO,
        v_page_count, CTLFLAG_RD, &vmstats.v_page_count, 0, 
        "Total number of pages in system");
-SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
+SYSCTL_ULONG(_vm_stats_vm, OID_AUTO,
        v_free_reserved, CTLFLAG_RD, &vmstats.v_free_reserved, 0,
        "Number of pages reserved for deadlock");
-SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
+SYSCTL_ULONG(_vm_stats_vm, OID_AUTO,
        v_free_target, CTLFLAG_RD, &vmstats.v_free_target, 0,
        "Number of pages desired free");
-SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
+SYSCTL_ULONG(_vm_stats_vm, OID_AUTO,
        v_free_min, CTLFLAG_RD, &vmstats.v_free_min, 0,
        "Minimum number of pages desired free");
-SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
+SYSCTL_ULONG(_vm_stats_vm, OID_AUTO,
        v_free_count, CTLFLAG_RD, &vmstats.v_free_count, 0,
        "Number of pages free");
-SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
+SYSCTL_ULONG(_vm_stats_vm, OID_AUTO,
        v_wire_count, CTLFLAG_RD, &vmstats.v_wire_count, 0,
        "Number of pages wired down");
-SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
+SYSCTL_ULONG(_vm_stats_vm, OID_AUTO,
        v_active_count, CTLFLAG_RD, &vmstats.v_active_count, 0,
        "Number of pages active");
-SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
+SYSCTL_ULONG(_vm_stats_vm, OID_AUTO,
        v_inactive_target, CTLFLAG_RD, &vmstats.v_inactive_target, 0,
        "Number of pages desired inactive");
-SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
+SYSCTL_ULONG(_vm_stats_vm, OID_AUTO,
        v_inactive_count, CTLFLAG_RD, &vmstats.v_inactive_count, 0,
        "Number of pages inactive");
-SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
+SYSCTL_ULONG(_vm_stats_vm, OID_AUTO,
        v_cache_count, CTLFLAG_RD, &vmstats.v_cache_count, 0,
        "Number of pages on buffer cache queue");
-SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
+SYSCTL_ULONG(_vm_stats_vm, OID_AUTO,
        v_cache_min, CTLFLAG_RD, &vmstats.v_cache_min, 0,
        "Min number of pages desired on cache queue");
-SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
+SYSCTL_ULONG(_vm_stats_vm, OID_AUTO,
        v_cache_max, CTLFLAG_RD, &vmstats.v_cache_max, 0,
        "Max number of pages in cached obj");
-SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
+SYSCTL_ULONG(_vm_stats_vm, OID_AUTO,
        v_pageout_free_min, CTLFLAG_RD, &vmstats.v_pageout_free_min, 0,
        "Min number pages reserved for kernel");
-SYSCTL_UINT(_vm_stats_vm, OID_AUTO,
+SYSCTL_ULONG(_vm_stats_vm, OID_AUTO,
        v_interrupt_free_min, CTLFLAG_RD, &vmstats.v_interrupt_free_min, 0,
        "Reserved number of pages for int code");
 
@@ -597,26 +597,26 @@ vmstats_rollup(void)
 void
 vmstats_rollup_cpu(globaldata_t gd)
 {
-       int value;
+       long value;
 
        if (gd->gd_vmstats_adj.v_free_count) {
-               value = atomic_swap_int(&gd->gd_vmstats_adj.v_free_count, 0);
-               atomic_add_int(&vmstats.v_free_count, value);
+               value = atomic_swap_long(&gd->gd_vmstats_adj.v_free_count, 0);
+               atomic_add_long(&vmstats.v_free_count, value);
        }
        if (gd->gd_vmstats_adj.v_cache_count) {
-               value = atomic_swap_int(&gd->gd_vmstats_adj.v_cache_count, 0);
-               atomic_add_int(&vmstats.v_cache_count, value);
+               value = atomic_swap_long(&gd->gd_vmstats_adj.v_cache_count, 0);
+               atomic_add_long(&vmstats.v_cache_count, value);
        }
        if (gd->gd_vmstats_adj.v_inactive_count) {
-               value =atomic_swap_int(&gd->gd_vmstats_adj.v_inactive_count, 0);
-               atomic_add_int(&vmstats.v_inactive_count, value);
+               value=atomic_swap_long(&gd->gd_vmstats_adj.v_inactive_count, 0);
+               atomic_add_long(&vmstats.v_inactive_count, value);
        }
        if (gd->gd_vmstats_adj.v_active_count) {
-               value = atomic_swap_int(&gd->gd_vmstats_adj.v_active_count, 0);
-               atomic_add_int(&vmstats.v_active_count, value);
+               value = atomic_swap_long(&gd->gd_vmstats_adj.v_active_count, 0);
+               atomic_add_long(&vmstats.v_active_count, value);
        }
        if (gd->gd_vmstats_adj.v_wire_count) {
-               value = atomic_swap_int(&gd->gd_vmstats_adj.v_wire_count, 0);
-               atomic_add_int(&vmstats.v_wire_count, value);
+               value = atomic_swap_long(&gd->gd_vmstats_adj.v_wire_count, 0);
+               atomic_add_long(&vmstats.v_wire_count, value);
        }
 }
index 9fbcc54..455523b 100644 (file)
@@ -169,8 +169,8 @@ vm_page_queue_init(void)
 /*
  * note: place in initialized data section?  Is this necessary?
  */
-long first_page = 0;
-int vm_page_array_size = 0;
+vm_pindex_t first_page = 0;
+vm_pindex_t vm_page_array_size = 0;
 vm_page_t vm_page_array = NULL;
 vm_paddr_t vm_low_phys_reserved;
 
@@ -227,11 +227,11 @@ vm_add_new_page(vm_paddr_t pa)
         * contigmalloc() to use.
         */
        if (pa < vm_low_phys_reserved) {
-               atomic_add_int(&vmstats.v_page_count, 1);
-               atomic_add_int(&vmstats.v_dma_pages, 1);
+               atomic_add_long(&vmstats.v_page_count, 1);
+               atomic_add_long(&vmstats.v_dma_pages, 1);
                m->queue = PQ_NONE;
                m->wire_count = 1;
-               atomic_add_int(&vmstats.v_wire_count, 1);
+               atomic_add_long(&vmstats.v_wire_count, 1);
                alist_free(&vm_contig_alist, pa >> PAGE_SHIFT, 1);
                return;
        }
@@ -242,8 +242,8 @@ vm_add_new_page(vm_paddr_t pa)
        m->queue = m->pc + PQ_FREE;
        KKASSERT(m->dirty == 0);
 
-       atomic_add_int(&vmstats.v_page_count, 1);
-       atomic_add_int(&vmstats.v_free_count, 1);
+       atomic_add_long(&vmstats.v_page_count, 1);
+       atomic_add_long(&vmstats.v_free_count, 1);
        vpq = &vm_page_queues[m->queue];
        TAILQ_INSERT_HEAD(&vpq->pl, m, pageq);
        ++vpq->lcnt;
@@ -269,7 +269,7 @@ vm_page_startup(void)
 {
        vm_offset_t vaddr = virtual2_start ? virtual2_start : virtual_start;
        vm_offset_t mapped;
-       vm_size_t npages;
+       vm_pindex_t npages;
        vm_paddr_t page_range;
        vm_paddr_t new_end;
        int i;
@@ -592,7 +592,7 @@ vm_page_startup_finish(void *dummy __unused)
                                blk, count, rblk);
                        break;
                }
-               atomic_add_int(&vmstats.v_dma_pages, -count);
+               atomic_add_long(&vmstats.v_dma_pages, -count);
                spin_unlock(&vm_contig_spin);
 
                m = PHYS_TO_VM_PAGE((vm_paddr_t)blk << PAGE_SHIFT);
@@ -775,7 +775,7 @@ _vm_page_rem_queue_spinlocked(vm_page_t m)
        struct vpgqueues *pq;
        u_short queue;
        u_short oqueue;
-       int *cnt;
+       long *cnt;
 
        queue = m->queue;
        if (queue != PQ_NONE) {
@@ -793,15 +793,15 @@ _vm_page_rem_queue_spinlocked(vm_page_t m)
                 * mastership changes in the global vmstats, which can be
                 * particularly bad in multi-socket systems.
                 */
-               cnt = (int *)((char *)&mycpu->gd_vmstats_adj + pq->cnt_offset);
-               atomic_add_int(cnt, -1);
+               cnt = (long *)((char *)&mycpu->gd_vmstats_adj + pq->cnt_offset);
+               atomic_add_long(cnt, -1);
                if (*cnt < -VMMETER_SLOP_COUNT) {
-                       u_int copy = atomic_swap_int(cnt, 0);
-                       cnt = (int *)((char *)&vmstats + pq->cnt_offset);
-                       atomic_add_int(cnt, copy);
-                       cnt = (int *)((char *)&mycpu->gd_vmstats +
+                       u_long copy = atomic_swap_long(cnt, 0);
+                       cnt = (long *)((char *)&vmstats + pq->cnt_offset);
+                       atomic_add_long(cnt, copy);
+                       cnt = (long *)((char *)&mycpu->gd_vmstats +
                                      pq->cnt_offset);
-                       atomic_add_int(cnt, copy);
+                       atomic_add_long(cnt, copy);
                }
                pq->lcnt--;
                m->queue = PQ_NONE;
@@ -825,7 +825,7 @@ static __inline void
 _vm_page_add_queue_spinlocked(vm_page_t m, u_short queue, int athead)
 {
        struct vpgqueues *pq;
-       u_int *cnt;
+       u_long *cnt;
 
        KKASSERT(m->queue == PQ_NONE);
 
@@ -839,8 +839,8 @@ _vm_page_add_queue_spinlocked(vm_page_t m, u_short queue, int athead)
                 * to incorporate the count it will call vmstats_rollup()
                 * to roll it all up into the global vmstats strufture.
                 */
-               cnt = (int *)((char *)&mycpu->gd_vmstats_adj + pq->cnt_offset);
-               atomic_add_int(cnt, 1);
+               cnt = (long *)((char *)&mycpu->gd_vmstats_adj + pq->cnt_offset);
+               atomic_add_long(cnt, 1);
 
                /*
                 * PQ_FREE is always handled LIFO style to try to provide
@@ -1965,7 +1965,7 @@ vm_page_alloc_contig(vm_paddr_t low, vm_paddr_t high,
 {
        alist_blk_t blk;
        vm_page_t m;
-       int i;
+       vm_pindex_t i;
 
        alignment >>= PAGE_SHIFT;
        if (alignment == 0)
@@ -2004,9 +2004,10 @@ vm_page_alloc_contig(vm_paddr_t low, vm_paddr_t high,
        }
 
        m = PHYS_TO_VM_PAGE((vm_paddr_t)blk << PAGE_SHIFT);
-       if (memattr != VM_MEMATTR_DEFAULT)
-               for (i = 0;i < size;i++)
+       if (memattr != VM_MEMATTR_DEFAULT) {
+               for (i = 0;i < size; i++)
                        pmap_page_set_memattr(&m[i], memattr);
+       }
        return m;
 }
 
@@ -2397,7 +2398,7 @@ vm_page_wire(vm_page_t m)
                if (atomic_fetchadd_int(&m->wire_count, 1) == 0) {
                        if ((m->flags & PG_UNMANAGED) == 0)
                                vm_page_unqueue(m);
-                       atomic_add_int(&mycpu->gd_vmstats_adj.v_wire_count, 1);
+                       atomic_add_long(&mycpu->gd_vmstats_adj.v_wire_count, 1);
                }
                KASSERT(m->wire_count != 0,
                        ("vm_page_wire: wire_count overflow m=%p", m));
@@ -2442,7 +2443,7 @@ vm_page_unwire(vm_page_t m, int activate)
                panic("vm_page_unwire: invalid wire count: %d", m->wire_count);
        } else {
                if (atomic_fetchadd_int(&m->wire_count, -1) == 1) {
-                       atomic_add_int(&mycpu->gd_vmstats_adj.v_wire_count, -1);
+                       atomic_add_long(&mycpu->gd_vmstats_adj.v_wire_count,-1);
                        if (m->flags & PG_UNMANAGED) {
                                ;
                        } else if (activate || (m->flags & PG_NEED_COMMIT)) {
@@ -3223,16 +3224,17 @@ vm_page_test_dirty(vm_page_t m)
 
 DB_SHOW_COMMAND(page, vm_page_print_page_info)
 {
-       db_printf("vmstats.v_free_count: %d\n", vmstats.v_free_count);
-       db_printf("vmstats.v_cache_count: %d\n", vmstats.v_cache_count);
-       db_printf("vmstats.v_inactive_count: %d\n", vmstats.v_inactive_count);
-       db_printf("vmstats.v_active_count: %d\n", vmstats.v_active_count);
-       db_printf("vmstats.v_wire_count: %d\n", vmstats.v_wire_count);
-       db_printf("vmstats.v_free_reserved: %d\n", vmstats.v_free_reserved);
-       db_printf("vmstats.v_free_min: %d\n", vmstats.v_free_min);
-       db_printf("vmstats.v_free_target: %d\n", vmstats.v_free_target);
-       db_printf("vmstats.v_cache_min: %d\n", vmstats.v_cache_min);
-       db_printf("vmstats.v_inactive_target: %d\n", vmstats.v_inactive_target);
+       db_printf("vmstats.v_free_count: %ld\n", vmstats.v_free_count);
+       db_printf("vmstats.v_cache_count: %ld\n", vmstats.v_cache_count);
+       db_printf("vmstats.v_inactive_count: %ld\n", vmstats.v_inactive_count);
+       db_printf("vmstats.v_active_count: %ld\n", vmstats.v_active_count);
+       db_printf("vmstats.v_wire_count: %ld\n", vmstats.v_wire_count);
+       db_printf("vmstats.v_free_reserved: %ld\n", vmstats.v_free_reserved);
+       db_printf("vmstats.v_free_min: %ld\n", vmstats.v_free_min);
+       db_printf("vmstats.v_free_target: %ld\n", vmstats.v_free_target);
+       db_printf("vmstats.v_cache_min: %ld\n", vmstats.v_cache_min);
+       db_printf("vmstats.v_inactive_target: %ld\n",
+                 vmstats.v_inactive_target);
 }
 
 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
index d1c44fc..91f58a6 100644 (file)
@@ -302,8 +302,8 @@ extern struct vpgqueues vm_page_queues[PQ_COUNT];
  */
 
 extern struct vm_page *vm_page_array;  /* First resident page in table */
-extern int vm_page_array_size;         /* number of vm_page_t's */
-extern long first_page;                        /* first physical page number */
+extern vm_pindex_t vm_page_array_size; /* number of vm_page_t's */
+extern vm_pindex_t first_page;         /* first physical page number */
 
 #define VM_PAGE_TO_PHYS(entry) \
                ((entry)->phys_addr)
index 557d8a7..c1d057f 100644 (file)
@@ -94,7 +94,7 @@ vm_page_count_severe(void)
  */
 static __inline 
 int
-vm_page_count_min(int donotcount)
+vm_page_count_min(long donotcount)
 {
     globaldata_t gd = mycpu;
 
@@ -134,7 +134,7 @@ vm_page_count_target(void)
  * This function DOES NOT return TRUE or FALSE.
  */
 static __inline 
-int
+long
 vm_paging_target(void)
 {
     globaldata_t gd = mycpu;
index 945da18..4d6126b 100644 (file)
  */
 
 /* the kernel process "vm_pageout"*/
-static int vm_pageout_page(vm_page_t m, int *max_launderp,
-                          int *vnodes_skippedp, struct vnode **vpfailedp,
+static int vm_pageout_page(vm_page_t m, long *max_launderp,
+                          long *vnodes_skippedp, struct vnode **vpfailedp,
                           int pass, int vmflush_flags);
 static int vm_pageout_clean_helper (vm_page_t, int);
 static int vm_pageout_free_page_calc (vm_size_t count);
@@ -228,10 +228,10 @@ static void vm_pageout_page_stats(int q);
  * So what we do is calculate a value that can be satisfied nominally by
  * only having to scan half the queues.
  */
-static __inline int
-PQAVERAGE(int n)
+static __inline long
+PQAVERAGE(long n)
 {
-       int avg;
+       long avg;
 
        if (n >= 0) {
                avg = ((n + (PQ_L2_SIZE - 1)) / (PQ_L2_SIZE / 2) + 1);
@@ -620,8 +620,8 @@ vm_pageout_mdp_callback(struct pmap_pgscan_info *info, vm_offset_t va,
         * and improves write performance.
         */
        if (cleanit) {
-               int max_launder = 0x7FFF;
-               int vnodes_skipped = 0;
+               long max_launder = 0x7FFF;
+               long vnodes_skipped = 0;
                int vmflush_flags;
                struct vnode *vpfailed = NULL;
 
@@ -737,15 +737,15 @@ static int vm_pageout_scan_callback(struct proc *p, void *data);
  * WARNING! Can be called from two pagedaemon threads simultaneously.
  */
 static int
-vm_pageout_scan_inactive(int pass, int q, int avail_shortage,
-                        int *vnodes_skipped)
+vm_pageout_scan_inactive(int pass, int q, long avail_shortage,
+                        long *vnodes_skipped)
 {
        vm_page_t m;
        struct vm_page marker;
        struct vnode *vpfailed;         /* warning, allowed to be stale */
        int maxscan;
-       int delta = 0;
-       int max_launder;
+       long delta = 0;
+       long max_launder;
        int isep;
 
        isep = (curthread == emergpager);
@@ -933,7 +933,7 @@ vm_pageout_scan_inactive(int pass, int q, int avail_shortage,
  * of by this function.
  */
 static int
-vm_pageout_page(vm_page_t m, int *max_launderp, int *vnodes_skippedp,
+vm_pageout_page(vm_page_t m, long *max_launderp, long *vnodes_skippedp,
                struct vnode **vpfailedp, int pass, int vmflush_flags)
 {
        vm_object_t object;
@@ -1265,14 +1265,14 @@ vm_pageout_page(vm_page_t m, int *max_launderp, int *vnodes_skippedp,
  */
 static int
 vm_pageout_scan_active(int pass, int q,
-                      int avail_shortage, int inactive_shortage,
-                      int *recycle_countp)
+                      long avail_shortage, long inactive_shortage,
+                      long *recycle_countp)
 {
        struct vm_page marker;
        vm_page_t m;
        int actcount;
-       int delta = 0;
-       int maxscan;
+       long delta = 0;
+       long maxscan;
        int isep;
 
        isep = (curthread == emergpager);
@@ -1533,8 +1533,8 @@ next:
  * WARNING! Can be called from two pagedaemon threads simultaneously.
  */
 static void
-vm_pageout_scan_cache(int avail_shortage, int pass,
-                     int vnodes_skipped, int recycle_count)
+vm_pageout_scan_cache(long avail_shortage, int pass,
+                     long vnodes_skipped, long recycle_count)
 {
        static int lastkillticks;
        struct vm_pageout_scan_info info;
@@ -1642,10 +1642,11 @@ next_rover:
            isep == 0 &&
            (vm_page_count_min(recycle_count) || avail_shortage > 0)) {
                kprintf("Warning: system low on memory+swap "
-                       "shortage %d for %d ticks!\n",
+                       "shortage %ld for %d ticks!\n",
                        avail_shortage, ticks - swap_fail_ticks);
                if (bootverbose)
-               kprintf("Metrics: spaf=%d spf=%d pass=%d avail=%d target=%d last=%u\n",
+               kprintf("Metrics: spaf=%d spf=%d pass=%d "
+                       "avail=%ld target=%ld last=%u\n",
                        swap_pager_almost_full,
                        swap_pager_full,
                        pass,
@@ -1745,8 +1746,8 @@ vm_pageout_page_stats(int q)
        static int fullintervalcount = 0;
        struct vm_page marker;
        vm_page_t m;
-       int pcount, tpcount;            /* Number of pages to check */
-       int page_shortage;
+       long pcount, tpcount;           /* Number of pages to check */
+       long page_shortage;
 
        page_shortage = (vmstats.v_inactive_target + vmstats.v_cache_max +
                         vmstats.v_free_min) -
@@ -2069,11 +2070,11 @@ skip_setup:
         */
        while (TRUE) {
                int error;
-               int avail_shortage;
-               int inactive_shortage;
-               int vnodes_skipped = 0;
-               int recycle_count = 0;
-               int tmp;
+               long avail_shortage;
+               long inactive_shortage;
+               long vnodes_skipped = 0;
+               long recycle_count = 0;
+               long tmp;
 
                /*
                 * Wait for an action request.  If we timeout check to
@@ -2161,7 +2162,7 @@ skip_setup:
                vm_pageout_deficit = 0;
 
                if (avail_shortage > 0) {
-                       int delta = 0;
+                       long delta = 0;
                        int qq;
 
                        qq = q1iterator;
@@ -2236,7 +2237,7 @@ skip_setup:
                        inactive_shortage = vm_emerg_launder;
 
                if (/*avail_shortage > 0 ||*/ inactive_shortage > 0) {
-                       int delta = 0;
+                       long delta = 0;
                        int qq;
 
                        qq = q2iterator;