kernel - Adjust pagezero/pagecopy assembly and re-enable VM_ALLOC_ZERO
authorMatthew Dillon <dillon@apollo.backplane.com>
Wed, 26 Oct 2011 21:48:10 +0000 (14:48 -0700)
committerMatthew Dillon <dillon@apollo.backplane.com>
Wed, 26 Oct 2011 21:48:10 +0000 (14:48 -0700)
* Remove the movnti, sfence, and prefetch instructions from the pagezero(0
  and pagecopy() assembly.  They don't help and will eve hurt on some of
  the less powerful cpus.

* Re-enable the use of VM_ALLOC_ZERO.  There was no difference in test
  compile times w/ the concurrent buildkernel -j 48 NO_MODULES=TRUE test.
  It might help w/lower-load edge cases so keep it around.

sys/platform/pc64/x86_64/support.s
sys/vm/vm_fault.c

index 3c1b2e7..ddd0a38 100644 (file)
@@ -67,13 +67,13 @@ ENTRY(pagezero)
        subq    %rdx,%rdi
        xorl    %eax,%eax
 1:
-       movnti  %rax,(%rdi,%rdx)
-       movnti  %rax,8(%rdi,%rdx)
-       movnti  %rax,16(%rdi,%rdx)
-       movnti  %rax,24(%rdi,%rdx)
+       movq    %rax,(%rdi,%rdx)        /* movnti */
+       movq    %rax,8(%rdi,%rdx)       /* movnti */
+       movq    %rax,16(%rdi,%rdx)      /* movnti */
+       movq    %rax,24(%rdi,%rdx)      /* movnti */
        addq    $32,%rdx
        jne     1b
-       sfence
+       /*sfence*/
        ret
 
 ENTRY(bcmp)
@@ -172,21 +172,21 @@ ENTRY(pagecopy)
        subq    %rax,%rdi
        subq    %rax,%rsi
 1:
-       prefetchnta (%rdi,%rax)
-       addq    $64,%rax
-       jne     1b
+       /*prefetchnta (%rdi,%rax)*/
+       /*addq  $64,%rax*/
+       /*jne   1b*/
 2:
        movq    (%rdi,%rdx),%rax
-       movnti  %rax,(%rsi,%rdx)
+       movq    %rax,(%rsi,%rdx)        /* movnti */
        movq    8(%rdi,%rdx),%rax
-       movnti  %rax,8(%rsi,%rdx)
+       movq    %rax,8(%rsi,%rdx)       /* movnti */
        movq    16(%rdi,%rdx),%rax
-       movnti  %rax,16(%rsi,%rdx)
+       movq    %rax,16(%rsi,%rdx)      /* movnti */
        movq    24(%rdi,%rdx),%rax
-       movnti  %rax,24(%rsi,%rdx)
+       movq    %rax,24(%rsi,%rdx)      /* movnti */
        addq    $32,%rdx
        jne     2b
-       sfence
+       /*sfence*/
        ret
 
 /* fillw(pat, base, cnt) */  
index 522b8bf..bc12340 100644 (file)
@@ -1153,18 +1153,13 @@ vm_fault_object(struct faultstate *fs,
 
                        /*
                         * Allocate a new page for this object/offset pair.
-                        *
-                        * XXX for now don't use the VM_ALLOC_ZERO flag
-                        *     because this will continuously cycle pages
-                        *     through the cpu caches.  Try to use a recently
-                        *     freed page.
                         */
                        fs->m = NULL;
                        if (!vm_page_count_severe()) {
                                fs->m = vm_page_alloc(fs->object, pindex,
                                    ((fs->vp || fs->object->backing_object) ?
                                        VM_ALLOC_NORMAL :
-                                       VM_ALLOC_NORMAL /*| VM_ALLOC_ZERO*/));
+                                       VM_ALLOC_NORMAL | VM_ALLOC_ZERO));
                        }
                        if (fs->m == NULL) {
                                vm_object_pip_wakeup(fs->first_object);
@@ -2240,14 +2235,9 @@ vm_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry, int prot)
 
                                /*
                                 * NOTE: Allocated from base object
-                                *
-                                * XXX for now don't use the VM_ALLOC_ZERO
-                                *     flag because this will continuously
-                                *     cycle pages through the cpu caches.
-                                *     Try to use a recently freed page.
                                 */
                                m = vm_page_alloc(object, index,
-                                             VM_ALLOC_NORMAL /*| VM_ALLOC_ZERO*/);
+                                                 VM_ALLOC_NORMAL | VM_ALLOC_ZERO);
 
                                if ((m->flags & PG_ZERO) == 0) {
                                        vm_page_zero_fill(m);