vkernel64: Additional adjustments (amd64 -> x86_64, recent commits etc.).
authorSascha Wildner <saw@online.de>
Sun, 21 Mar 2010 09:11:10 +0000 (10:11 +0100)
committerSascha Wildner <saw@online.de>
Sun, 21 Mar 2010 09:11:10 +0000 (10:11 +0100)
38 files changed:
include/strings.h
sys/conf/files
sys/config/VKERNEL64
sys/kern/kern_shutdown.c
sys/net/altq/altq_subr.c
sys/platform/pc64/conf/files
sys/platform/vkernel64/Makefile.inc [new file with mode: 0644]
sys/platform/vkernel64/amd64/mplock.s [deleted file]
sys/platform/vkernel64/conf/files
sys/platform/vkernel64/conf/ldscript.x86_64 [moved from sys/platform/vkernel64/conf/ldscript.amd64 with 100% similarity]
sys/platform/vkernel64/include/globaldata.h
sys/platform/vkernel64/include/lock.h
sys/platform/vkernel64/include/pcb_ext.h
sys/platform/vkernel64/include/proc.h
sys/platform/vkernel64/include/types.h [copied from sys/platform/vkernel64/include/proc.h with 80% similarity]
sys/platform/vkernel64/platform/console.c
sys/platform/vkernel64/platform/copyio.c
sys/platform/vkernel64/platform/cothread.c
sys/platform/vkernel64/platform/init.c
sys/platform/vkernel64/platform/pmap.c
sys/platform/vkernel64/x86_64/autoconf.c [moved from sys/platform/vkernel64/amd64/autoconf.c with 99% similarity]
sys/platform/vkernel64/x86_64/cpu_regs.c [moved from sys/platform/vkernel64/amd64/cpu_regs.c with 99% similarity]
sys/platform/vkernel64/x86_64/db_interface.c [moved from sys/platform/vkernel64/amd64/db_interface.c with 99% similarity]
sys/platform/vkernel64/x86_64/db_trace.c [moved from sys/platform/vkernel64/amd64/db_trace.c with 100% similarity]
sys/platform/vkernel64/x86_64/exception.c [moved from sys/platform/vkernel64/amd64/exception.c with 100% similarity]
sys/platform/vkernel64/x86_64/fork_tramp.s [moved from sys/platform/vkernel64/amd64/fork_tramp.s with 100% similarity]
sys/platform/vkernel64/x86_64/genassym.c [moved from sys/platform/vkernel64/amd64/genassym.c with 98% similarity]
sys/platform/vkernel64/x86_64/global.s [moved from sys/platform/vkernel64/amd64/global.s with 100% similarity]
sys/platform/vkernel64/x86_64/locore.s [moved from sys/platform/vkernel64/amd64/locore.s with 100% similarity]
sys/platform/vkernel64/x86_64/mp.c [moved from sys/platform/vkernel64/amd64/mp.c with 99% similarity]
sys/platform/vkernel64/x86_64/npx.c [moved from sys/platform/vkernel64/amd64/npx.c with 100% similarity]
sys/platform/vkernel64/x86_64/procfs_machdep.c [moved from sys/platform/vkernel64/amd64/procfs_machdep.c with 100% similarity]
sys/platform/vkernel64/x86_64/swtch.s [moved from sys/platform/vkernel64/amd64/swtch.s with 100% similarity]
sys/platform/vkernel64/x86_64/tls.c [moved from sys/platform/vkernel64/amd64/tls.c with 100% similarity]
sys/platform/vkernel64/x86_64/trap.c [moved from sys/platform/vkernel64/amd64/trap.c with 98% similarity]
sys/platform/vkernel64/x86_64/userldt.c [moved from sys/platform/vkernel64/amd64/userldt.c with 100% similarity]
sys/platform/vkernel64/x86_64/vm_machdep.c [moved from sys/platform/vkernel64/amd64/vm_machdep.c with 100% similarity]
sys/vm/vm_page.c

index 7c90b89..0c218e3 100644 (file)
@@ -45,9 +45,9 @@ void   bzero(void *, size_t);                                 /* LEGACY */
 int     ffs(int) __pure2;
 #endif
 #ifdef __BSD_VISIBLE
-int     ffsl(long) __pure2;
 int     ffsll(long long) __pure2;
 #if !defined(_KERNEL_VIRTUAL)
+int     ffsl(long) __pure2;
 int     fls(int) __pure2;
 int     flsl(long) __pure2;
 #endif
index 778e96e..80d779f 100644 (file)
@@ -630,7 +630,7 @@ kern/imgact_resident.c      standard
 kern/imgact_gzip.c     optional gzip
 kern/imgact_shell.c    standard
 kern/inflate.c         optional gzip
-kern/init_main.c       standard
+kern/init_main.c       standard nowerror
 kern/init_sysent.c     standard
 kern/kern_checkpoint.c standard
 kern/kern_sysref.c     standard
index 34916f2..a4215bd 100644 (file)
@@ -3,8 +3,8 @@
 # $DragonFly: src/sys/config/VKERNEL,v 1.14 2008/11/09 18:57:17 dillon Exp $
 
 platform       vkernel64       # platform architecture (i386, vkernel, etc)
-machine                amd64
-machine_arch   amd64           # cpu architecture (i386, etc)
+machine                x86_64
+machine_arch   x86_64          # cpu architecture (i386, etc)
 ident          VKERNEL64
 maxusers       0
 
index b50869f..199eed0 100644 (file)
@@ -426,10 +426,10 @@ shutdown_busycount2(struct buf *bp, void *info)
                }
 #if defined(SHOW_BUSYBUFS) || defined(DIAGNOSTIC)
                kprintf(
-           "%p dev:?, flags:%08x, loffset:%lld, doffset:%lld\n",
+           "%p dev:?, flags:%08x, loffset:%jd, doffset:%jd\n",
                    bp, 
-                   bp->b_flags, bp->b_loffset,
-                   bp->b_bio2.bio_offset);
+                   bp->b_flags, (intmax_t)bp->b_loffset,
+                   (intmax_t)bp->b_bio2.bio_offset);
 #endif
                return(1);
        }
index 1383d94..67e124a 100644 (file)
@@ -817,7 +817,7 @@ init_machclk(void)
                machclk_freq = 1000000LLU << MACHCLK_SHIFT;
                machclk_per_tick = machclk_freq / hz;
 #ifdef ALTQ_DEBUG
-               kprintf("altq: emulate %lluHz cpu clock\n", machclk_freq);
+               kprintf("altq: emulate %juHz cpu clock\n", (uintmax_t)machclk_freq);
 #endif
                return;
        }
@@ -855,7 +855,7 @@ init_machclk(void)
        machclk_per_tick = machclk_freq / hz;
 
 #ifdef ALTQ_DEBUG
-       kprintf("altq: CPU clock: %lluHz\n", machclk_freq);
+       kprintf("altq: CPU clock: %juHz\n", (uintmax_t)machclk_freq);
 #endif
 }
 
index 4ac434b..a57dd69 100644 (file)
@@ -90,13 +90,12 @@ vfs/smbfs/smbfs_vnops.c             optional        smbfs
 
 cpu/x86_64/misc/atomic.c               standard                        \
         compile-with    "${CC} -c ${CFLAGS} ${WERROR} ${DEFINED_PROF:S/^$/-fomit-frame-pointer/} ${.IMPSRC}"
-platform/pc64/amd64/autoconf.c standard
-platform/pc64/amd64/mpboot.S           optional        smp
-platform/pc64/amd64/mplock.s           optional        smp
+platform/pc64/x86_64/autoconf.c        standard
+platform/pc64/x86_64/mpboot.S          optional        smp
 
 # DDB XXX
-cpu/x86_64/misc/x86_64-gdbstub.c               optional        ddb
-cpu/x86_64/misc/lwbuf.c                                standard
+cpu/x86_64/misc/x86_64-gdbstub.c       optional        ddb
+cpu/x86_64/misc/lwbuf.c                        standard
 cpu/x86_64/misc/elf_machdep.c          standard
 cpu/x86_64/misc/in_cksum2.s            optional        inet
 cpu/x86_64/misc/ktr.c                  optional        ktr
diff --git a/sys/platform/vkernel64/Makefile.inc b/sys/platform/vkernel64/Makefile.inc
new file mode 100644 (file)
index 0000000..669994c
--- /dev/null
@@ -0,0 +1,4 @@
+# Used by the device build to check for device support
+#
+
+DEV_SUPPORT=   virtual disk/ccd disk/md disk/vn
diff --git a/sys/platform/vkernel64/amd64/mplock.s b/sys/platform/vkernel64/amd64/mplock.s
deleted file mode 100644 (file)
index 7bd0f5c..0000000
+++ /dev/null
@@ -1,283 +0,0 @@
-/*
- * $FreeBSD: src/sys/i386/i386/mplock.s,v 1.29.2.2 2000/05/16 06:58:06 dillon Exp $
- * $DragonFly: src/sys/platform/pc32/i386/mplock.s,v 1.21 2006/11/07 06:43:24 dillon Exp $
- *
- * Copyright (c) 2003,2004 The DragonFly Project.  All rights reserved.
- *
- * This code is derived from software contributed to The DragonFly Project
- * by Matthew Dillon <dillon@backplane.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- * 3. Neither the name of The DragonFly Project nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific, prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
- * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
- * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- *                             DragonFly MPLOCK operation
- *
- * Each thread has an MP lock count, td_mpcount, and there is a shared
- * global called mp_lock.  mp_lock is the physical MP lock and contains either
- * -1 or the cpuid of the cpu owning the lock.  The count is *NOT* integrated
- * into mp_lock but instead resides in each thread td_mpcount.
- *
- * When obtaining or releasing the MP lock the td_mpcount is PREDISPOSED
- * to the desired count *PRIOR* to operating on the mp_lock itself.  MP
- * lock operations can occur outside a critical section with interrupts
- * enabled with the provisio (which the routines below handle) that an
- * interrupt may come along and preempt us, racing our cmpxchgl instruction
- * to perform the operation we have requested by pre-disposing td_mpcount.
- *
- * Additionally, the LWKT threading system manages the MP lock and
- * lwkt_switch(), in particular, may be called after pre-disposing td_mpcount
- * to handle 'blocking' on the MP lock.
- *
- *
- * Recoded from the FreeBSD original:
- * ----------------------------------------------------------------------------
- * "THE BEER-WARE LICENSE" (Revision 42):
- * <phk@FreeBSD.org> wrote this file.  As long as you retain this notice you
- * can do whatever you want with this stuff. If we meet some day, and you think
- * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
- * ----------------------------------------------------------------------------
- */
-
-#include <machine/asmacros.h>
-#if 0
-#include <machine_base/apic/apicreg.h>
-#endif
-
-#include "assym.s"
-
-/*
- * YYY Debugging only.  Define this to be paranoid about invalidating the
- * TLB when we get giant.
- */
-#undef PARANOID_INVLTLB
-
-       .data
-       ALIGN_DATA
-#ifdef SMP
-       .globl  mp_lock
-mp_lock:
-       .long   -1                      /* initialized to not held */
-#endif
-
-       .text
-       SUPERALIGN_TEXT
-
-       /*
-        * Note on cmpxchgl... exchanges ecx with mem if mem matches eax.
-        * Z=1 (jz) on success.   A lock prefix is required for MP.
-        */
-NON_GPROF_ENTRY(cpu_get_initial_mplock)
-       movq    PCPU(curthread),%rcx
-       movl    $1,TD_MPCOUNT(%rcx)     /* curthread has mpcount of 1 */
-       movl    $0,mp_lock              /* owned by cpu 0 */
-       NON_GPROF_RET
-
-       /*
-        * cpu_try_mplock() returns non-zero on success, 0 on failure.  It
-        * only adjusts mp_lock, it does not touch td_mpcount.  Callers
-        * should always increment td_mpcount *before* trying to acquire
-        * the actual lock, predisposing td_mpcount to the desired state of
-        * the lock.
-        *
-        * NOTE! Only call cpu_try_mplock() inside a critical section.  If
-        * you don't an interrupt can come along and get and release
-        * the lock before our cmpxchgl instruction, causing us to fail
-        * but resulting in the lock being held by our cpu.
-        */
-NON_GPROF_ENTRY(cpu_try_mplock)
-       movl    PCPU(cpuid),%ecx
-       movl    $-1,%eax
-       lock cmpxchgl %ecx,mp_lock      /* ecx<->mem if eax matches */
-       jnz     1f
-#ifdef PARANOID_INVLTLB
-       movq    %cr3,%rax; movq %rax,%cr3       /* YYY check and remove */
-#endif
-       movl    $1,%eax
-       NON_GPROF_RET
-1:
-       subl    %eax,%eax
-       NON_GPROF_RET
-
-       /*
-        * get_mplock() Obtains the MP lock and may switch away if it cannot
-        * get it.  This routine may be called WITHOUT a critical section
-        * and with cpu interrupts enabled.
-        *
-        * To handle races in a sane fashion we predispose TD_MPCOUNT,
-        * which prevents us from losing the lock in a race if we already
-        * have it or happen to get it.  It also means that we might get
-        * the lock in an interrupt race before we have a chance to execute
-        * our cmpxchgl instruction, so we have to handle that case.
-        * Fortunately simply calling lwkt_switch() handles the situation
-        * for us and also 'blocks' us until the MP lock can be obtained.
-        */
-NON_GPROF_ENTRY(get_mplock)
-       movl    PCPU(cpuid),%ecx
-       movq    PCPU(curthread),%rdx
-       incl    TD_MPCOUNT(%rdx)        /* predispose */
-       cmpl    %ecx,mp_lock
-       jne     1f
-       NON_GPROF_RET                   /* success! */
-
-       /*
-        * We don't already own the mp_lock, use cmpxchgl to try to get
-        * it.
-        */
-1:
-       movl    $-1,%eax
-       lock cmpxchgl %ecx,mp_lock
-       jnz     2f
-       NON_GPROF_RET                   /* success */
-
-       /*
-        * Failure, but we could end up owning mp_lock anyway due to
-        * an interrupt race.  lwkt_switch() will clean up the mess
-        * and 'block' until the mp_lock is obtained.
-        *
-        * Create a stack frame for the call so KTR logs the stack
-        * backtrace properly.
-        */
-2:
-       pushq   %rbp
-       movq    %rsp,%rbp
-       call    lwkt_mp_lock_contested
-       popq    %rbp
-#ifdef INVARIANTS
-       movl    PCPU(cpuid),%eax        /* failure */
-       cmpl    %eax,mp_lock
-       jne     4f
-#endif
-       NON_GPROF_RET
-#ifdef INVARIANTS
-4:
-       cmpl    $0,panicstr             /* don't double panic */
-       je      badmp_get2
-       NON_GPROF_RET
-#endif
-
-       /*
-        * try_mplock() attempts to obtain the MP lock.  1 is returned on
-        * success, 0 on failure.  We do not have to be in a critical section
-        * and interrupts are almost certainly enabled.
-        *
-        * We must pre-dispose TD_MPCOUNT in order to deal with races in
-        * a reasonable way.
-        *
-        */
-NON_GPROF_ENTRY(try_mplock)
-       movl    PCPU(cpuid),%ecx
-       movq    PCPU(curthread),%rdx
-       incl    TD_MPCOUNT(%rdx)                /* pre-dispose for race */
-       cmpl    %ecx,mp_lock
-       je      1f                              /* trivial success */
-       movl    $-1,%eax
-       lock cmpxchgl %ecx,mp_lock
-       jnz     2f
-       /*
-        * Success
-        */
-#ifdef PARANOID_INVLTLB
-       movq    %cr3,%rax; movq %rax,%cr3       /* YYY check and remove */
-#endif
-1:
-       movl    $1,%eax                         /* success (cmpxchgl good!) */
-       NON_GPROF_RET
-
-       /*
-        * The cmpxchgl failed but we might have raced.  Undo the mess by
-        * predispoing TD_MPCOUNT and then checking.  If TD_MPCOUNT is
-        * still non-zero we don't care what state the lock is in (since
-        * we obviously didn't own it above), just return failure even if
-        * we won the lock in an interrupt race.  If TD_MPCOUNT is zero
-        * make sure we don't own the lock in case we did win it in a race.
-        */
-2:
-       decl    TD_MPCOUNT(%rdx)
-       cmpl    $0,TD_MPCOUNT(%rdx)
-       jne     3f
-       movl    PCPU(cpuid),%eax
-       movl    $-1,%ecx
-       lock cmpxchgl %ecx,mp_lock
-3:
-       subl    %eax,%eax
-       NON_GPROF_RET
-
-       /*
-        * rel_mplock() releases a previously obtained MP lock.
-        *
-        * In order to release the MP lock we pre-dispose TD_MPCOUNT for
-        * the release and basically repeat the release portion of try_mplock
-        * above.
-        */
-NON_GPROF_ENTRY(rel_mplock)
-       movq    PCPU(curthread),%rdx
-       movl    TD_MPCOUNT(%rdx),%eax
-#ifdef INVARIANTS
-       cmpl    $0,%eax
-       je      badmp_rel
-#endif
-       subl    $1,%eax
-       movl    %eax,TD_MPCOUNT(%rdx)
-       cmpl    $0,%eax
-       jne     3f
-       movl    PCPU(cpuid),%eax
-       movl    $-1,%ecx
-       lock cmpxchgl %ecx,mp_lock
-       movl    mp_lock_contention_mask,%eax
-       cmpl    $0,%eax
-       je      3f
-       call    lwkt_mp_lock_uncontested
-3:
-       NON_GPROF_RET
-
-#ifdef INVARIANTS
-
-badmp_get:
-       movq    $bmpsw1,%rdi
-       movl    $0,%eax
-       call    panic
-badmp_get2:
-       movq    $bmpsw1a,%rdi
-       movl    $0,%eax
-       call    panic
-badmp_rel:
-       movq    $bmpsw2,%rdi
-       movl    $0,%eax
-       call    panic
-
-       .data
-
-bmpsw1:
-       .asciz  "try/get_mplock(): already have lock! %d %p"
-
-bmpsw1a:
-       .asciz  "try/get_mplock(): failed on count or switch %d %p"
-
-bmpsw2:
-       .asciz  "rel_mplock(): mpcount already 0 @ %p %p %p %p %p %p %p %p!"
-
-#endif
index b96e7fc..69ab38a 100644 (file)
@@ -11,19 +11,19 @@ vfs/smbfs/smbfs_smb.c               optional        smbfs
 vfs/smbfs/smbfs_subr.c         optional        smbfs
 vfs/smbfs/smbfs_vfsops.c       optional        smbfs
 vfs/smbfs/smbfs_vnops.c                optional        smbfs
-cpu/amd64/misc/atomic.c                standard                                \
+cpu/x86_64/misc/atomic.c       standard                                \
        compile-with    "${CC} -c ${CFLAGS} ${DEFINED_PROF:S/^$/-fomit-frame-pointer/} ${.IMPSRC}"
-platform/vkernel64/amd64/autoconf.c    standard
-platform/vkernel64/amd64/mp.c          optional        smp             \
+platform/vkernel64/x86_64/autoconf.c   standard
+platform/vkernel64/x86_64/mp.c         optional        smp             \
        compile-with    "${CC} -c -pthread ${CFLAGS} -I/usr/include ${.IMPSRC}"
-platform/vkernel64/amd64/mplock.s              optional        smp
 #
 # DDB XXX
-cpu/amd64/misc/elf_machdep.c           standard
-cpu/amd64/misc/in_cksum2.s             optional        inet
-cpu/amd64/misc/ktr.c                   optional        ktr
-cpu/amd64/misc/db_disasm.c             optional        ddb
-cpu/amd64/misc/amd64-gdbstub.c         optional        ddb
+cpu/x86_64/misc/elf_machdep.c          standard
+cpu/x86_64/misc/lwbuf.c                        standard
+cpu/x86_64/misc/in_cksum2.s            optional        inet
+cpu/x86_64/misc/ktr.c                  optional        ktr
+cpu/x86_64/misc/db_disasm.c            optional        ddb
+cpu/x86_64/misc/x86_64-gdbstub.c       optional        ddb
 #
 # DOS mbr and gpt
 kern/subr_diskmbr.c                    standard
@@ -37,26 +37,26 @@ dev/virtual/net/if_vke.c            optional        vke
 
 # PLATFORM FILES
 #
-platform/vkernel64/amd64/global.s              standard
-platform/vkernel64/amd64/swtch.s               standard
-platform/vkernel64/amd64/npx.c         standard
-platform/vkernel64/amd64/db_interface.c        optional        ddb
-platform/vkernel64/amd64/db_trace.c    optional        ddb
-platform/vkernel64/amd64/vm_machdep.c  standard
-platform/vkernel64/amd64/cpu_regs.c    standard
-platform/vkernel64/amd64/userldt.c             standard
-platform/vkernel64/amd64/tls.c         standard
-platform/vkernel64/amd64/trap.c                standard
-platform/vkernel64/amd64/exception.c   standard
-platform/vkernel64/amd64/procfs_machdep.c      standard
-platform/vkernel64/amd64/fork_tramp.s  standard
-platform/vkernel64/platform/init.c     standard
+platform/vkernel64/x86_64/global.s             standard
+platform/vkernel64/x86_64/swtch.s              standard
+platform/vkernel64/x86_64/npx.c                standard
+platform/vkernel64/x86_64/db_interface.c       optional        ddb
+platform/vkernel64/x86_64/db_trace.c   optional        ddb
+platform/vkernel64/x86_64/vm_machdep.c standard
+platform/vkernel64/x86_64/cpu_regs.c   standard
+platform/vkernel64/x86_64/userldt.c            standard
+platform/vkernel64/x86_64/tls.c                standard
+platform/vkernel64/x86_64/trap.c       standard
+platform/vkernel64/x86_64/exception.c  standard
+platform/vkernel64/x86_64/procfs_machdep.c     standard
+platform/vkernel64/x86_64/fork_tramp.s standard
+platform/vkernel64/platform/init.c     standard nowerror
 platform/vkernel64/platform/globaldata.c       standard
 platform/vkernel64/platform/kqueue.c   standard
 platform/vkernel64/platform/shutdown.c standard
 platform/vkernel64/platform/machintr.c standard
 platform/vkernel64/platform/copyio.c   standard
-platform/vkernel64/platform/pmap.c     standard
+platform/vkernel64/platform/pmap.c     standard nowerror
 platform/vkernel64/platform/pmap_inval.c       standard
 platform/vkernel64/platform/busdma_machdep.c standard
 platform/vkernel64/platform/sysarch.c  standard
index 113cc15..770eed2 100644 (file)
@@ -75,7 +75,7 @@ struct mdglobaldata {
        struct user_segment_descriptor gd_common_tssd;
        struct user_segment_descriptor *gd_tss_gdt;
        struct thread   *gd_npxthread;
-       struct amd64tss gd_common_tss;
+       struct x86_64tss gd_common_tss;
        union savefpu   gd_savefpu;     /* fast bcopy/zero temp fpu save area */
        int             gd_fpu_lock;    /* fast bcopy/zero cpu lock */
        int             gd_fpending;    /* fast interrupt pending */
index 44e27c7..7c365f0 100644 (file)
 #include <machine/psl.h>
 #endif
 
-/*
- * MP_FREE_LOCK is used by both assembly and C under SMP.
- */
-#ifdef SMP
-#define MP_FREE_LOCK           0xffffffff      /* value of lock when free */
-#endif
-
 #ifndef LOCORE
 
-#if defined(_KERNEL) || defined(_UTHREAD)
-
-/*
- * MP LOCK functions for SMP and UP.  Under UP the MP lock does not exist
- * but we leave a few functions intact as macros for convenience.
- */
-#ifdef SMP
-
-void   get_mplock(void);
-int    try_mplock(void);
-void   rel_mplock(void);
-int    cpu_try_mplock(void);
-void   cpu_get_initial_mplock(void);
-
-extern u_int   mp_lock;
-
-#define MP_LOCK_HELD()   (mp_lock == mycpu->gd_cpuid)
-#define ASSERT_MP_LOCK_HELD(td)   KASSERT(MP_LOCK_HELD(), ("MP_LOCK_HELD(): not held thread %p", td))
-
-static __inline void
-cpu_rel_mplock(void)
-{
-       mp_lock = MP_FREE_LOCK;
-}
-
-static __inline int
-owner_mplock(void)
-{
-       return (mp_lock);
-}
-
-#else
-
-#define get_mplock()
-#define try_mplock()   1
-#define rel_mplock()
-#define owner_mplock() 0       /* always cpu 0 */
-#define MP_LOCK_HELD() (!0)
-#define ASSERT_MP_LOCK_HELD(td)
-
-#endif /* SMP */
-#endif  /* _KERNEL || _UTHREAD */
 #endif /* LOCORE */
 #endif /* !_MACHINE_LOCK_H_ */
index 71effad..4f12f83 100644 (file)
@@ -47,7 +47,7 @@
 
 struct pcb_ext {
        struct  user_segment_descriptor ext_tssd;       /* tss descriptor */
-       struct  amd64tss        ext_tss;        /* per-process amd64tss */
+       struct  x86_64tss       ext_tss;        /* per-process amd64tss */
        caddr_t ext_iomap;              /* i/o permission bitmap */
 };
 
index 99c9a4a..34a3473 100644 (file)
@@ -47,4 +47,6 @@ struct mdproc {
        struct trapframe *md_regs;      /* registers on current frame */
 };
 
+int grow_stack(struct proc *p, u_long sp); /* XXX swildner */
+
 #endif /* !_MACHINE_PROC_H_ */
similarity index 80%
copy from sys/platform/vkernel64/include/proc.h
copy to sys/platform/vkernel64/include/types.h
index 99c9a4a..d76854f 100644 (file)
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $DragonFly: src/sys/platform/vkernel/include/proc.h,v 1.1 2006/11/07 18:50:07 dillon Exp $
  */
+#ifndef _MACHINE_TYPES_H_
+#define        _MACHINE_TYPES_H_
 
-#ifndef _MACHINE_PROC_H_
-#define        _MACHINE_PROC_H_
+#include <cpu/types.h>
 
-/*
- * When a trap or exception occurs the trap code stores the frame pointer
- * in md_regs so emulation and other code can modify it for the return.
- */
-struct trapframe;
-
-struct mdproc {
-       struct trapframe *md_regs;      /* registers on current frame */
-};
+typedef __uint32_t     intrmask_t;
 
-#endif /* !_MACHINE_PROC_H_ */
+#endif /* !_MACHINE_TYPES_H_ */
index bc5190a..db99c5a 100644 (file)
@@ -379,7 +379,7 @@ vconsgetc(void *private)
                        break;
                if (n < 0 && errno == EINTR)
                        continue;
-               panic("vconsgetc: EOF on console %d %d", n ,errno);
+               panic("vconsgetc: EOF on console %jd %d", (intmax_t)n, errno);
        }
        console_stolen_by_kernel = 0;
        return((int)c);
index 6e7141f..d7d7155 100644 (file)
@@ -36,7 +36,7 @@
 
 #include <sys/types.h>
 #include <sys/systm.h>
-#include <sys/sfbuf.h>
+#include <cpu/lwbuf.h>
 #include <vm/vm_page.h>
 #include <vm/vm_extern.h>
 #include <assert.h>
@@ -44,6 +44,8 @@
 #include <sys/stat.h>
 #include <sys/mman.h>
 
+#include <sys/mplock2.h>
+
 /*
  * A bcopy that works dring low level boot, before FP is working
  */
@@ -126,7 +128,7 @@ int
 copyin(const void *udaddr, void *kaddr, size_t len)
 {
        struct vmspace *vm = curproc->p_vmspace;
-       struct sf_buf *sf;
+       struct lwbuf *lwb;
        vm_page_t m;
        int error;
        size_t n;
@@ -142,14 +144,14 @@ copyin(const void *udaddr, void *kaddr, size_t len)
                n = PAGE_SIZE - ((vm_offset_t)udaddr & PAGE_MASK);
                if (n > len)
                        n = len;
-               sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
-               bcopy((char *)sf_buf_kva(sf)+((vm_offset_t)udaddr & PAGE_MASK),
+               lwb = lwbuf_alloc(m);
+               bcopy((char *)lwbuf_kva(lwb)+((vm_offset_t)udaddr & PAGE_MASK),
                      kaddr, n);
                len -= n;
                udaddr = (const char *)udaddr + n;
                kaddr = (char *)kaddr + n;
                vm_page_unhold(m);
-               sf_buf_free(sf);
+               lwbuf_free(lwb);
        }
        rel_mplock();
        return (error);
@@ -164,7 +166,7 @@ int
 copyout(const void *kaddr, void *udaddr, size_t len)
 {
        struct vmspace *vm = curproc->p_vmspace;
-       struct sf_buf *sf;
+       struct lwbuf *lwb;
        vm_page_t m;
        int error;
        size_t n;
@@ -180,15 +182,15 @@ copyout(const void *kaddr, void *udaddr, size_t len)
                n = PAGE_SIZE - ((vm_offset_t)udaddr & PAGE_MASK);
                if (n > len)
                        n = len;
-               sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
-               bcopy(kaddr, (char *)sf_buf_kva(sf) +
+               lwb = lwbuf_alloc(m);
+               bcopy(kaddr, (char *)lwbuf_kva(lwb) +
                             ((vm_offset_t)udaddr & PAGE_MASK), n);
                len -= n;
                udaddr = (char *)udaddr + n;
                kaddr = (const char *)kaddr + n;
                vm_page_dirty(m);
                vm_page_unhold(m);
-               sf_buf_free(sf);
+               lwbuf_free(lwb);
        }
        rel_mplock();
        return (error);
index 3fd4360..dc81faa 100644 (file)
@@ -128,7 +128,6 @@ static void
 cothread_thread(void *arg)
 {
        cothread_t cotd = arg;
-       int dummy = 0;
 
        cpu_mask_all_signals(); /* XXX remove me? should already be masked */
        /*
index 3065f20..ad2fd1f 100644 (file)
@@ -91,6 +91,8 @@ vm_offset_t KvaEnd;
 vm_offset_t KvaSize;
 vm_offset_t virtual_start;
 vm_offset_t virtual_end;
+vm_offset_t virtual2_start;
+vm_offset_t virtual2_end;
 vm_offset_t kernel_vm_end;
 vm_offset_t crashdumpmap;
 vm_offset_t clean_sva;
index febdbd7..b688e7a 100644 (file)
@@ -192,7 +192,6 @@ static void i386_protection_init (void);
 static __inline void   pmap_clearbit (vm_page_t m, int bit);
 
 static void    pmap_remove_all (vm_page_t m);
-static void    pmap_enter_quick (pmap_t pmap, vm_offset_t va, vm_page_t m);
 static int pmap_remove_pte (struct pmap *pmap, pt_entry_t *ptq,
                                vm_offset_t sva);
 static void pmap_remove_page (struct pmap *pmap, vm_offset_t va);
@@ -805,7 +804,7 @@ pmap_kremove_quick(vm_offset_t va)
  *     specified memory.
  */
 vm_offset_t
-pmap_map(vm_offset_t virt, vm_paddr_t start, vm_paddr_t end, int prot)
+pmap_map(vm_offset_t *virtp, vm_paddr_t start, vm_paddr_t end, int prot)
 {
        return PHYS_TO_DMAP(start);
 }
@@ -2343,7 +2342,7 @@ validate:
  *
  * Currently this routine may only be used on user pmaps, not kernel_pmap.
  */
-static void
+void
 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m)
 {
        pt_entry_t *pte;
@@ -2471,7 +2470,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_prot_t prot,
        if (lp == NULL || pmap != vmspace_pmap(lp->lwp_vmspace))
                return;
 
-       psize = amd64_btop(size);
+       psize = x86_64_btop(size);
 
        if ((object->type != OBJT_VNODE) ||
                ((limit & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) &&
@@ -2529,121 +2528,34 @@ pmap_object_init_pt_callback(vm_page_t p, void *data)
                vm_page_busy(p);
                rel_index = p->pindex - info->start_pindex;
                pmap_enter_quick(info->pmap,
-                                info->addr + amd64_ptob(rel_index), p);
+                                info->addr + x86_64_ptob(rel_index), p);
                vm_page_wakeup(p);
        }
        return(0);
 }
 
 /*
- * pmap_prefault provides a quick way of clustering pagefaults into a
- * processes address space.  It is a "cousin" of pmap_object_init_pt,
- * except it runs at page fault time instead of mmap time.
+ * Return TRUE if the pmap is in shape to trivially
+ * pre-fault the specified address.
+ *
+ * Returns FALSE if it would be non-trivial or if a
+ * pte is already loaded into the slot.
  */
-#define PFBAK 4
-#define PFFOR 4
-#define PAGEORDER_SIZE (PFBAK+PFFOR)
-
-static int pmap_prefault_pageorder[] = {
-       -PAGE_SIZE, PAGE_SIZE,
-       -2 * PAGE_SIZE, 2 * PAGE_SIZE,
-       -3 * PAGE_SIZE, 3 * PAGE_SIZE,
-       -4 * PAGE_SIZE, 4 * PAGE_SIZE
-};
-
-void
-pmap_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
+int
+pmap_prefault_ok(pmap_t pmap, vm_offset_t addr)
 {
-       int i;
-       vm_offset_t starta;
-       vm_offset_t addr;
-       vm_pindex_t pindex;
-       vm_page_t m;
-       vm_object_t object;
-       struct lwp *lp;
-
-       /*
-        * We do not currently prefault mappings that use virtual page
-        * tables.  We do not prefault foreign pmaps.
-        */
-       if (entry->maptype == VM_MAPTYPE_VPAGETABLE)
-               return;
-       lp = curthread->td_lwp;
-       if (lp == NULL || (pmap != vmspace_pmap(lp->lwp_vmspace)))
-               return;
-
-       object = entry->object.vm_object;
-
-       starta = addra - PFBAK * PAGE_SIZE;
-       if (starta < entry->start)
-               starta = entry->start;
-       else if (starta > addra)
-               starta = 0;
-
-       /*
-        * critical section protection is required to maintain the
-        * page/object association, interrupts can free pages and remove
-        * them from their objects.
-        */
-       crit_enter();
-       for (i = 0; i < PAGEORDER_SIZE; i++) {
-               vm_object_t lobject;
-               pt_entry_t *pte;
-               pd_entry_t *pde;
-
-               addr = addra + pmap_prefault_pageorder[i];
-               if (addr > addra + (PFFOR * PAGE_SIZE))
-                       addr = 0;
-
-               if (addr < starta || addr >= entry->end)
-                       continue;
-
-               pde = pmap_pde(pmap, addr);
-               if (pde == NULL || *pde == 0)
-                       continue;
-
-               pte = pmap_pde_to_pte(pde, addr);
-               if (*pte)
-                       continue;
-
-               pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
-               lobject = object;
-
-               for (m = vm_page_lookup(lobject, pindex);
-                   (!m && (lobject->type == OBJT_DEFAULT) &&
-                    (lobject->backing_object));
-                   lobject = lobject->backing_object
-               ) {
-                       if (lobject->backing_object_offset & PAGE_MASK)
-                               break;
-                       pindex += (lobject->backing_object_offset >> PAGE_SHIFT);
-                       m = vm_page_lookup(lobject->backing_object, pindex);
-               }
-
-               /*
-                * give-up when a page is not in memory
-                */
-               if (m == NULL)
-                       break;
+       pt_entry_t *pte;
+       pd_entry_t *pde;
 
-               /*
-                * If everything meets the requirements for pmap_enter_quick(),
-                * then enter the page.
-                */
+       pde = pmap_pde(pmap, addr);
+       if (pde == NULL || *pde == 0)
+               return(0);
 
-               if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
-                       (m->busy == 0) &&
-                   (m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
+       pte = pmap_pde_to_pte(pde, addr);
+       if (*pte)
+               return(0);
 
-                       if ((m->queue - m->pc) == PQ_CACHE) {
-                               vm_page_deactivate(m);
-                       }
-                       vm_page_busy(m);
-                       pmap_enter_quick(pmap, addr, m);
-                       vm_page_wakeup(m);
-               }
-       }
-       crit_exit();
+       return(1);
 }
 
 /*
@@ -3069,7 +2981,7 @@ pmap_page_protect(vm_page_t m, vm_prot_t prot)
 vm_paddr_t
 pmap_phys_address(vm_pindex_t ppn)
 {
-       return (amd64_ptob(ppn));
+       return (x86_64_ptob(ppn));
 }
 
 /*
similarity index 99%
rename from sys/platform/vkernel64/amd64/autoconf.c
rename to sys/platform/vkernel64/x86_64/autoconf.c
index 208af00..beb96dd 100644 (file)
@@ -128,8 +128,8 @@ cpu_startup(void *dummy)
        vm_offset_t pager_eva;
 
        kprintf("%s", version);
-       kprintf("real memory = %llu (%lluK bytes)\n",
-               ptoa(Maxmem), ptoa(Maxmem) / 1024);
+       kprintf("real memory = %ju (%juK bytes)\n",
+           (uintmax_t)ptoa(Maxmem), (uintmax_t)(ptoa(Maxmem) / 1024));
 
        if (nbuf == 0) {
                int factor = 4 * BKVASIZE / 1024;
similarity index 99%
rename from sys/platform/vkernel64/amd64/cpu_regs.c
rename to sys/platform/vkernel64/x86_64/cpu_regs.c
index d4ea2a1..36c62e1 100644 (file)
@@ -82,6 +82,7 @@
 #include <vm/vm_extern.h>
 
 #include <sys/thread2.h>
+#include <sys/mplock2.h>
 
 #include <sys/user.h>
 #include <sys/exec.h>
similarity index 99%
rename from sys/platform/vkernel64/amd64/db_interface.c
rename to sys/platform/vkernel64/x86_64/db_interface.c
index c4bca1b..cea7d81 100644 (file)
@@ -100,7 +100,7 @@ static int  db_global_jmpbuf_valid;
  *  kdb_trap - field a TRACE or BPT trap
  */
 int
-kdb_trap(int type, int code, struct amd64_saved_state *regs)
+kdb_trap(int type, int code, struct x86_64_saved_state *regs)
 {
        volatile int ddb_mode = !(boothowto & RB_GDB);
 
similarity index 100%
rename from sys/platform/vkernel64/amd64/db_trace.c
rename to sys/platform/vkernel64/x86_64/db_trace.c
index 3de9a40..b86c9c5 100644 (file)
@@ -660,9 +660,9 @@ static
 void
 dl_symbol_values(long callpc, const char **name)
 {
+/*
        Dl_info info;
 
-/*
        if (*name == NULL) {
                if (dladdr((const void *)callpc, &info) != 0) {
                        if (info.dli_saddr <= (const void *)callpc)
similarity index 98%
rename from sys/platform/vkernel64/amd64/genassym.c
rename to sys/platform/vkernel64/x86_64/genassym.c
index 56e34bf..af70e87 100644 (file)
@@ -87,7 +87,7 @@ ASSYM(PCB_RBP, offsetof(struct pcb, pcb_rbp));
 ASSYM(PCB_RSP, offsetof(struct pcb, pcb_rsp));
 ASSYM(PCB_RBX, offsetof(struct pcb, pcb_rbx));
 ASSYM(PCB_RIP, offsetof(struct pcb, pcb_rip));
-ASSYM(TSS_RSP0, offsetof(struct amd64tss, tss_rsp0));
+ASSYM(TSS_RSP0, offsetof(struct x86_64tss, tss_rsp0));
 
 ASSYM(PCB_DR0, offsetof(struct pcb, pcb_dr0));
 ASSYM(PCB_DR1, offsetof(struct pcb, pcb_dr1));
similarity index 99%
rename from sys/platform/vkernel64/amd64/mp.c
rename to sys/platform/vkernel64/x86_64/mp.c
index 155647f..1e74e47 100644 (file)
@@ -46,6 +46,8 @@
 #include <vm/vm_object.h>
 #include <vm/vm_page.h>
 
+#include <sys/mplock2.h>
+
 #include <machine/cpu.h>
 #include <machine/cpufunc.h>
 #include <machine/globaldata.h>
similarity index 98%
rename from sys/platform/vkernel64/amd64/trap.c
rename to sys/platform/vkernel64/x86_64/trap.c
index 2d266a2..628033e 100644 (file)
 #include <machine/tss.h>
 #include <machine/globaldata.h>
 
-
 #include <ddb/ddb.h>
+
 #include <sys/msgport2.h>
 #include <sys/thread2.h>
+#include <sys/mplock2.h>
 
 #ifdef SMP
 
@@ -953,7 +954,7 @@ trap_fatal(struct trapframe *frame, int usermode, vm_offset_t eva)
        kprintf("cpuid = %d\n", mycpu->gd_cpuid);
 #endif
        if (type == T_PAGEFLT) {
-               kprintf("fault virtual address  = 0x%x\n", eva);
+               kprintf("fault virtual address  = %p\n", (void *)eva);
                kprintf("fault code             = %s %s, %s\n",
                        usermode ? "user" : "supervisor",
                        code & PGEX_W ? "write" : "read",
@@ -983,7 +984,7 @@ trap_fatal(struct trapframe *frame, int usermode, vm_offset_t eva)
        if (frame->tf_eflags & PSL_VM)
                kprintf("vm86, ");
 #endif
-       kprintf("IOPL = %d\n", (frame->tf_rflags & PSL_IOPL) >> 12);
+       kprintf("IOPL = %jd\n", (intmax_t)((frame->tf_rflags & PSL_IOPL) >> 12));
        kprintf("current process                = ");
        if (curproc) {
                kprintf("%lu (%s)\n",
@@ -1036,7 +1037,9 @@ trap_fatal(struct trapframe *frame, int usermode, vm_offset_t eva)
 void
 dblfault_handler(void)
 {
+#if JG
        struct mdglobaldata *gd = mdcpu;
+#endif
 
        kprintf("\nFatal double fault:\n");
 #if JG
@@ -1161,10 +1164,7 @@ syscall2(struct trapframe *frame)
         * call.  The current frame is copied out to the virtual kernel.
         */
        if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
-               error = vkernel_trap(lp, frame);
-               frame->tf_rax = error;
-               if (error)
-                       frame->tf_rflags |= PSL_C;
+               vkernel_trap(lp, frame);
                error = EJUSTRETURN;
                goto out;
        }
@@ -1255,16 +1255,10 @@ syscall2(struct trapframe *frame)
 
        STOPEVENT(p, S_SCE, narg);      /* MP aware */
 
-#ifdef SMP
        /*
-        * Try to run the syscall without the MP lock if the syscall
-        * is MP safe.  We have to obtain the MP lock no matter what if
-        * we are ktracing
+        * NOTE: All system calls run MPSAFE now.  The system call itself
+        *       is responsible for getting the MP lock.
         */
-       if ((callp->sy_narg & SYF_MPSAFE) == 0)
-               MAKEMPSAFE(have_mplock);
-#endif
-
        error = (*callp->sy_call)(&args);
 
 #if 0
@@ -1465,7 +1459,7 @@ go_user(struct intrframe *frame)
 #endif
                if (r < 0) {
                        if (errno != EINTR)
-                               panic("vmspace_ctl failed");
+                               panic("vmspace_ctl failed error %d", errno);
                } else {
                        if (tf->tf_trapno) {
                                user_trap(tf);
@@ -1499,3 +1493,17 @@ set_vkernel_fp(struct trapframe *frame)
                td->td_pcb->pcb_flags &= ~FP_VIRTFP;
        }
 }
+
+/*
+ * Called from vkernel_trap() to fixup the vkernel's syscall
+ * frame for vmspace_ctl() return.
+ */
+void
+cpu_vkernel_trap(struct trapframe *frame, int error)
+{
+       frame->tf_rax = error;
+       if (error)
+               frame->tf_rflags |= PSL_C;
+       else
+               frame->tf_rflags &= ~PSL_C;
+}
index 2a20a23..b360147 100644 (file)
@@ -282,7 +282,7 @@ vm_page_startup(vm_offset_t vaddr)
            VM_PROT_READ | VM_PROT_WRITE);
        vm_page_array = (vm_page_t)mapped;
 
-#ifdef __x86_64__
+#if defined(__x86_64__) && !defined(_KERNEL_VIRTUAL)
        /*
         * since pmap_map on amd64 returns stuff out of a direct-map region,
         * we have to manually add these pages to the minidump tracking so