more header changes for amd64 port; the pc64 building infrastructure
authorYonghong Yan <yanyh@dragonflybsd.org>
Sun, 23 Sep 2007 04:29:31 +0000 (04:29 +0000)
committerYonghong Yan <yanyh@dragonflybsd.org>
Sun, 23 Sep 2007 04:29:31 +0000 (04:29 +0000)
and dummy sources as the initial skelecton.

65 files changed:
sys/cpu/amd64/include/asmacros.h [new file with mode: 0644]
sys/cpu/amd64/include/atomic.h
sys/cpu/amd64/include/bus_dma.h [new file with mode: 0644]
sys/cpu/amd64/include/cpu.h
sys/cpu/amd64/include/cpufunc.h
sys/cpu/amd64/include/cputypes.h [new file with mode: 0644]
sys/cpu/amd64/include/db_machdep.h [new file with mode: 0644]
sys/cpu/amd64/include/elf.h
sys/cpu/amd64/include/frame.h
sys/cpu/amd64/include/limits.h
sys/cpu/amd64/include/npx.h
sys/cpu/amd64/include/param.h
sys/cpu/amd64/include/pmap.h
sys/cpu/amd64/include/psl.h
sys/cpu/amd64/include/reg.h
sys/cpu/amd64/include/segments.h
sys/cpu/amd64/include/sigframe.h [new file with mode: 0644]
sys/cpu/amd64/include/specialreg.h [new file with mode: 0644]
sys/cpu/amd64/include/trap.h
sys/cpu/amd64/include/vframe.h
sys/platform/pc64/Makefile.inc
sys/platform/pc64/amd64/atomic.c [new file with mode: 0644]
sys/platform/pc64/amd64/autoconf.c [new file with mode: 0644]
sys/platform/pc64/amd64/busdma_machdep.c [new file with mode: 0644]
sys/platform/pc64/amd64/console.c [copied from sys/cpu/amd64/include/vframe.h with 78% similarity]
sys/platform/pc64/amd64/cpu_regs.c [new file with mode: 0644]
sys/platform/pc64/amd64/db_disasm.c [new file with mode: 0644]
sys/platform/pc64/amd64/db_interface.c [new file with mode: 0644]
sys/platform/pc64/amd64/db_trace.c [new file with mode: 0644]
sys/platform/pc64/amd64/elf_machdep.c [new file with mode: 0644]
sys/platform/pc64/amd64/exception.c [new file with mode: 0644]
sys/platform/pc64/amd64/fork_tramp.s [new file with mode: 0644]
sys/platform/pc64/amd64/genassym.c [new file with mode: 0644]
sys/platform/pc64/amd64/global.s [new file with mode: 0644]
sys/platform/pc64/amd64/globaldata.c [copied from sys/cpu/amd64/include/vframe.h with 74% similarity]
sys/platform/pc64/amd64/in_cksum2.s [copied from sys/cpu/amd64/include/vframe.h with 53% similarity]
sys/platform/pc64/amd64/init.c [new file with mode: 0644]
sys/platform/pc64/amd64/ipl_funcs.c [new file with mode: 0644]
sys/platform/pc64/amd64/ktr.c [copied from sys/cpu/amd64/include/vframe.h with 53% similarity]
sys/platform/pc64/amd64/locore.s [new file with mode: 0644]
sys/platform/pc64/amd64/machintr.c [copied from sys/cpu/amd64/include/vframe.h with 50% similarity]
sys/platform/pc64/amd64/mp.c [new file with mode: 0644]
sys/platform/pc64/amd64/mplock.s [new file with mode: 0644]
sys/platform/pc64/amd64/npx.c [new file with mode: 0644]
sys/platform/pc64/amd64/pmap.c [new file with mode: 0644]
sys/platform/pc64/amd64/procfs_machdep.c [new file with mode: 0644]
sys/platform/pc64/amd64/sigtramp.s [new file with mode: 0644]
sys/platform/pc64/amd64/support.s [new file with mode: 0644]
sys/platform/pc64/amd64/swtch.s [new file with mode: 0644]
sys/platform/pc64/amd64/sysarch.c [copied from sys/cpu/amd64/include/vframe.h with 77% similarity]
sys/platform/pc64/amd64/systimer.c [copied from sys/cpu/amd64/include/vframe.h with 56% similarity]
sys/platform/pc64/amd64/tls.c [new file with mode: 0644]
sys/platform/pc64/amd64/trap.c [copied from sys/platform/pc64/include/types.h with 62% similarity]
sys/platform/pc64/amd64/userldt.c [copied from sys/cpu/amd64/include/vframe.h with 76% similarity]
sys/platform/pc64/amd64/vm_machdep.c [new file with mode: 0644]
sys/platform/pc64/conf/Makefile [new file with mode: 0644]
sys/platform/pc64/conf/files [new file with mode: 0644]
sys/platform/pc64/conf/kern.mk [new file with mode: 0644]
sys/platform/pc64/conf/ldscript.amd64 [new file with mode: 0644]
sys/platform/pc64/conf/options [new file with mode: 0644]
sys/platform/pc64/include/lock.h
sys/platform/pc64/include/md_var.h
sys/platform/pc64/include/pcb.h
sys/platform/pc64/include/thread.h
sys/platform/pc64/include/types.h

diff --git a/sys/cpu/amd64/include/asmacros.h b/sys/cpu/amd64/include/asmacros.h
new file mode 100644 (file)
index 0000000..6d9f476
--- /dev/null
@@ -0,0 +1,197 @@
+/*-
+ * Copyright (c) 1993 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: src/sys/amd64/include/asmacros.h,v 1.32 2006/10/28 06:04:29 bde Exp $
+ * $DragonFly: src/sys/cpu/amd64/include/asmacros.h,v 1.1 2007/09/23 04:29:30 yanyh Exp $
+ */
+
+#ifndef _CPU_ASMACROS_H_
+#define _CPU_ASMACROS_H_
+
+#include <sys/cdefs.h>
+
+/* XXX too much duplication in various asm*.h's. */
+
+/*
+ * CNAME is used to manage the relationship between symbol names in C
+ * and the equivalent assembly language names.  CNAME is given a name as
+ * it would be used in a C program.  It expands to the equivalent assembly
+ * language name.
+ */
+#define CNAME(csym)            csym
+
+#define ALIGN_DATA     .p2align 3      /* 8 byte alignment, zero filled */
+#ifdef GPROF
+#define ALIGN_TEXT     .p2align 4,0x90 /* 16-byte alignment, nop filled */
+#else
+#define ALIGN_TEXT     .p2align 4,0x90 /* 16-byte alignment, nop filled */
+#endif
+#define SUPERALIGN_TEXT        .p2align 4,0x90 /* 16-byte alignment, nop filled */
+
+#define GEN_ENTRY(name)                ALIGN_TEXT; .globl CNAME(name); \
+                               .type CNAME(name),@function; CNAME(name):
+#define NON_GPROF_ENTRY(name)  GEN_ENTRY(name)
+#define NON_GPROF_RET          .byte 0xc3      /* opcode for `ret' */
+
+#ifdef GPROF
+/*
+ * __mcount is like [.]mcount except that doesn't require its caller to set
+ * up a frame pointer.  It must be called before pushing anything onto the
+ * stack.  gcc should eventually generate code to call __mcount in most
+ * cases.  This would make -pg in combination with -fomit-frame-pointer
+ * useful.  gcc has a configuration variable PROFILE_BEFORE_PROLOGUE to
+ * allow profiling before setting up the frame pointer, but this is
+ * inadequate for good handling of special cases, e.g., -fpic works best
+ * with profiling after the prologue.
+ *
+ * [.]mexitcount is a new function to support non-statistical profiling if an
+ * accurate clock is available.  For C sources, calls to it are generated
+ * by the FreeBSD extension `-mprofiler-epilogue' to gcc.  It is best to
+ * call [.]mexitcount at the end of a function like the MEXITCOUNT macro does,
+ * but gcc currently generates calls to it at the start of the epilogue to
+ * avoid problems with -fpic.
+ *
+ * [.]mcount and __mcount may clobber the call-used registers and %ef.
+ * [.]mexitcount may clobber %ecx and %ef.
+ *
+ * Cross-jumping makes non-statistical profiling timing more complicated.
+ * It is handled in many cases by calling [.]mexitcount before jumping.  It
+ * is handled for conditional jumps using CROSSJUMP() and CROSSJUMP_LABEL().
+ * It is handled for some fault-handling jumps by not sharing the exit
+ * routine.
+ *
+ * ALTENTRY() must be before a corresponding ENTRY() so that it can jump to
+ * the main entry point.  Note that alt entries are counted twice.  They
+ * have to be counted as ordinary entries for gprof to get the call times
+ * right for the ordinary entries.
+ *
+ * High local labels are used in macros to avoid clashes with local labels
+ * in functions.
+ *
+ * Ordinary `ret' is used instead of a macro `RET' because there are a lot
+ * of `ret's.  0xc3 is the opcode for `ret' (`#define ret ... ret' can't
+ * be used because this file is sometimes preprocessed in traditional mode).
+ * `ret' clobbers eflags but this doesn't matter.
+ */
+#define ALTENTRY(name)         GEN_ENTRY(name) ; MCOUNT ; MEXITCOUNT ; jmp 9f
+#define        CROSSJUMP(jtrue, label, jfalse) \
+       jfalse 8f; MEXITCOUNT; jmp __CONCAT(to,label); 8:
+#define CROSSJUMPTARGET(label) \
+       ALIGN_TEXT; __CONCAT(to,label): ; MCOUNT; jmp label
+#define ENTRY(name)            GEN_ENTRY(name) ; 9: ; MCOUNT
+#define FAKE_MCOUNT(caller)    pushq caller ; call __mcount ; popq %rcx
+#define MCOUNT                 call __mcount
+#define MCOUNT_LABEL(name)     GEN_ENTRY(name) ; nop ; ALIGN_TEXT
+#ifdef GUPROF
+#define MEXITCOUNT             call .mexitcount
+#define ret                    MEXITCOUNT ; NON_GPROF_RET
+#else
+#define MEXITCOUNT
+#endif
+
+#else /* !GPROF */
+/*
+ * ALTENTRY() has to align because it is before a corresponding ENTRY().
+ * ENTRY() has to align to because there may be no ALTENTRY() before it.
+ * If there is a previous ALTENTRY() then the alignment code for ENTRY()
+ * is empty.
+ */
+#define ALTENTRY(name)         GEN_ENTRY(name)
+#define        CROSSJUMP(jtrue, label, jfalse) jtrue label
+#define        CROSSJUMPTARGET(label)
+#define ENTRY(name)            GEN_ENTRY(name)
+#define FAKE_MCOUNT(caller)
+#define MCOUNT
+#define MCOUNT_LABEL(name)
+#define MEXITCOUNT
+#endif /* GPROF */
+
+#ifdef LOCORE
+/*
+ * Convenience macro for declaring interrupt entry points.
+ */
+#define        IDTVEC(name)    ALIGN_TEXT; .globl __CONCAT(X,name); \
+                       .type __CONCAT(X,name),@function; __CONCAT(X,name):
+
+/*
+ * Macros to create and destroy a trap frame.
+ */
+#define PUSH_FRAME                                                     \
+       subq    $TF_RIP,%rsp ;  /* skip dummy tf_err and tf_trapno */   \
+       testb   $SEL_RPL_MASK,TF_CS(%rsp) ; /* come from kernel? */     \
+       jz      1f ;            /* Yes, dont swapgs again */            \
+       swapgs ;                                                        \
+1:     movq    %rdi,TF_RDI(%rsp) ;                                     \
+       movq    %rsi,TF_RSI(%rsp) ;                                     \
+       movq    %rdx,TF_RDX(%rsp) ;                                     \
+       movq    %rcx,TF_RCX(%rsp) ;                                     \
+       movq    %r8,TF_R8(%rsp) ;                                       \
+       movq    %r9,TF_R9(%rsp) ;                                       \
+       movq    %rax,TF_RAX(%rsp) ;                                     \
+       movq    %rbx,TF_RBX(%rsp) ;                                     \
+       movq    %rbp,TF_RBP(%rsp) ;                                     \
+       movq    %r10,TF_R10(%rsp) ;                                     \
+       movq    %r11,TF_R11(%rsp) ;                                     \
+       movq    %r12,TF_R12(%rsp) ;                                     \
+       movq    %r13,TF_R13(%rsp) ;                                     \
+       movq    %r14,TF_R14(%rsp) ;                                     \
+       movq    %r15,TF_R15(%rsp)
+
+#define POP_FRAME                                                      \
+       movq    TF_RDI(%rsp),%rdi ;                                     \
+       movq    TF_RSI(%rsp),%rsi ;                                     \
+       movq    TF_RDX(%rsp),%rdx ;                                     \
+       movq    TF_RCX(%rsp),%rcx ;                                     \
+       movq    TF_R8(%rsp),%r8 ;                                       \
+       movq    TF_R9(%rsp),%r9 ;                                       \
+       movq    TF_RAX(%rsp),%rax ;                                     \
+       movq    TF_RBX(%rsp),%rbx ;                                     \
+       movq    TF_RBP(%rsp),%rbp ;                                     \
+       movq    TF_R10(%rsp),%r10 ;                                     \
+       movq    TF_R11(%rsp),%r11 ;                                     \
+       movq    TF_R12(%rsp),%r12 ;                                     \
+       movq    TF_R13(%rsp),%r13 ;                                     \
+       movq    TF_R14(%rsp),%r14 ;                                     \
+       movq    TF_R15(%rsp),%r15 ;                                     \
+       testb   $SEL_RPL_MASK,TF_CS(%rsp) ; /* come from kernel? */     \
+       jz      1f ;            /* keep kernel GS.base */               \
+       cli ;                                                           \
+       swapgs ;                                                        \
+1:     addq    $TF_RIP,%rsp    /* skip over tf_err, tf_trapno */
+
+/*
+ * Access per-CPU data.
+ */
+#define        PCPU(member)    %gs:gd_ ## member
+#define        PCPU_ADDR(member, reg)                                  \
+       movq %gs:PC_PRVSPACE, reg ;                             \
+       addq $PC_ ## member, reg
+
+#endif /* LOCORE */
+
+#endif /* !_CPU_ASMACROS_H_ */
index ad23ce7..33a6367 100644 (file)
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/amd64/include/atomic.h,v 1.32 2003/11/21 03:02:00 peter Exp $
- * $DragonFly: src/sys/cpu/amd64/include/atomic.h,v 1.1 2007/08/21 19:40:24 corecode Exp $
+ * $DragonFly: src/sys/cpu/amd64/include/atomic.h,v 1.2 2007/09/23 04:29:30 yanyh Exp $
  */
 #ifndef _CPU_ATOMIC_H_
 #define _CPU_ATOMIC_H_
 
+#ifndef _SYS_TYPES_H_
+#include <sys/types.h>
+#endif
+
 /*
  * Various simple arithmetic on memory which is atomic in the presence
  * of interrupts and multiple processors.
@@ -66,7 +70,8 @@
  */
 #if defined(KLD_MODULE)
 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V)                    \
-void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
+void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); \
+void atomic_##NAME##_##TYPE##_nonlocked(volatile u_##TYPE *p, u_##TYPE v);
 
 int atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src);
 int atomic_cmpset_long(volatile u_long *dst, u_long exp, u_long src);
@@ -101,12 +106,19 @@ atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
                         : "+m" (*p)                    \
                         : CONS (V));                   \
 }                                                      \
-struct __hack
+static __inline void                                    \
+atomic_##NAME##_##TYPE##_nonlocked(volatile u_##TYPE *p, u_##TYPE v)\
+{                                                       \
+        __asm __volatile(OP                             \
+                         : "+m" (*p)                    \
+                         : CONS (V));                   \
+}
 
 #else /* !__GNUC__ */
 
 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V)                            \
-extern void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
+extern void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);  \
+extern void atomic_##NAME##_##TYPE##_nonlocked(volatile u_##TYPE *p, u_##TYPE v);
 
 #endif /* __GNUC__ */
 
@@ -178,7 +190,7 @@ int atomic_intr_cond_try(atomic_intr_t *p);
 void atomic_intr_cond_enter(atomic_intr_t *p, void (*func)(void *), void *arg);
 void atomic_intr_cond_exit(atomic_intr_t *p, void (*func)(void *), void *arg);
 
-#else
+#else /* !KLD_MODULE */
 
 static __inline
 void
@@ -224,7 +236,7 @@ atomic_intr_cond_enter(atomic_intr_t *p, void (*func)(void *), void *arg)
        __asm __volatile(MPLOCKED "incl %0; " \
                         "1: ;" \
                         MPLOCKED "btsl $31,%0; jnc 2f; " \
-                        "movq %2,%rdi; call *%1; " \
+                        "movq %2,%%rdi; call *%1; " \
                         "jmp 1b; " \
                         "2: ;" \
                         : "+m" (*p) \
@@ -269,7 +281,7 @@ atomic_intr_cond_exit(atomic_intr_t *p, void (*func)(void *), void *arg)
        __asm __volatile(MPLOCKED "decl %0; " \
                        MPLOCKED "btrl $31,%0; " \
                        "testl $0x3FFFFFFF,%0; jz 1f; " \
-                        "movq %2,%rdi; call *%1; " \
+                        "movq %2,%%rdi; call *%1; " \
                         "1: ;" \
                         : "+m" (*p) \
                         : "r"(func), "m"(arg) \
@@ -369,7 +381,7 @@ extern void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
 
 #endif /* defined(__GNUC__) */
 
-#endif /* KLD_MODULE */
+#endif /* !KLD_MODULE */
 
 ATOMIC_ASM(set,             char,  "orb %b1,%0",  "iq",  v);
 ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
@@ -396,6 +408,10 @@ ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0");
 ATOMIC_STORE_LOAD(int, "cmpxchgl %0,%1",  "xchgl %1,%0");
 ATOMIC_STORE_LOAD(long,        "cmpxchgq %0,%1",  "xchgq %1,%0");
 
+#define        atomic_cmpset_32        atomic_cmpset_int
+
+#if 0
+
 #undef ATOMIC_ASM
 #undef ATOMIC_STORE_LOAD
 
@@ -484,7 +500,6 @@ ATOMIC_STORE_LOAD(long,     "cmpxchgq %0,%1",  "xchgq %1,%0");
 #define        atomic_subtract_rel_32  atomic_subtract_rel_int
 #define        atomic_load_acq_32      atomic_load_acq_int
 #define        atomic_store_rel_32     atomic_store_rel_int
-#define        atomic_cmpset_32        atomic_cmpset_int
 #define        atomic_cmpset_acq_32    atomic_cmpset_acq_int
 #define        atomic_cmpset_rel_32    atomic_cmpset_rel_int
 #define        atomic_readandclear_32  atomic_readandclear_int
@@ -580,4 +595,6 @@ extern u_int        atomic_readandclear_int(volatile u_int *);
 #endif /* defined(__GNUC__) */
 
 #endif /* !defined(WANT_FUNCTIONS) */
+#endif /* 0 */
+
 #endif /* ! _CPU_ATOMIC_H_ */
diff --git a/sys/cpu/amd64/include/bus_dma.h b/sys/cpu/amd64/include/bus_dma.h
new file mode 100644 (file)
index 0000000..cd03113
--- /dev/null
@@ -0,0 +1,967 @@
+/*-
+ * Copyright (c) 2005 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $DragonFly: src/sys/cpu/amd64/include/bus_dma.h,v 1.1 2007/09/23 04:29:30 yanyh Exp $
+ */
+
+#ifndef _CPU_BUS_DMA_H_
+#define _CPU_BUS_DMA_H_
+
+#include <machine/cpufunc.h>
+
+/*
+ * Bus address and size types
+ */
+
+typedef uint64_t bus_addr_t;
+typedef uint64_t bus_size_t;
+
+typedef uint64_t bus_space_tag_t;
+typedef uint64_t bus_space_handle_t;
+
+#define BUS_SPACE_MAXSIZE_24BIT 0xFFFFFF
+#define BUS_SPACE_MAXSIZE_32BIT        0xFFFFFFFF
+#define BUS_SPACE_MAXSIZE      (64 * 1024) /* Maximum supported size */
+#define BUS_SPACE_MAXADDR_24BIT        0xFFFFFF
+#define BUS_SPACE_MAXADDR_32BIT        0xFFFFFFFF
+#define BUS_SPACE_MAXADDR      BUS_SPACE_MAXADDR_32BIT
+
+#define BUS_SPACE_UNRESTRICTED (~0)
+
+/*
+ * Values for the amd64 bus space tag, not to be used directly by MI code.
+ */
+#define AMD64_BUS_SPACE_IO     0       /* space is i/o space */
+#define AMD64_BUS_SPACE_MEM    1       /* space is mem space */
+
+/*
+ * Map a region of device bus space into CPU virtual address space.
+ */
+
+static __inline int bus_space_map(bus_space_tag_t t, bus_addr_t addr,
+                                 bus_size_t size, int flags,
+                                 bus_space_handle_t *bshp);
+
+static __inline int
+bus_space_map(bus_space_tag_t t __unused, bus_addr_t addr,
+             bus_size_t size __unused, int flags __unused,
+             bus_space_handle_t *bshp)
+{
+
+       *bshp = addr;
+       return (0);
+}
+
+/*
+ * Unmap a region of device bus space.
+ */
+
+static __inline void bus_space_unmap(bus_space_tag_t t, bus_space_handle_t bsh,
+                                    bus_size_t size);
+
+static __inline void
+bus_space_unmap(bus_space_tag_t t __unused, bus_space_handle_t bsh __unused,
+               bus_size_t size __unused)
+{
+}
+
+/*
+ * Get a new handle for a subregion of an already-mapped area of bus space.
+ */
+
+static __inline int bus_space_subregion(bus_space_tag_t t,
+                                       bus_space_handle_t bsh,
+                                       bus_size_t offset, bus_size_t size,
+                                       bus_space_handle_t *nbshp);
+
+static __inline int
+bus_space_subregion(bus_space_tag_t t __unused, bus_space_handle_t bsh,
+                   bus_size_t offset, bus_size_t size __unused,
+                   bus_space_handle_t *nbshp)
+{
+
+       *nbshp = bsh + offset;
+       return (0);
+}
+
+/*
+ * Allocate a region of memory that is accessible to devices in bus space.
+ */
+
+int    bus_space_alloc(bus_space_tag_t t, bus_addr_t rstart,
+                       bus_addr_t rend, bus_size_t size, bus_size_t align,
+                       bus_size_t boundary, int flags, bus_addr_t *addrp,
+                       bus_space_handle_t *bshp);
+
+/*
+ * Free a region of bus space accessible memory.
+ */
+
+static __inline void bus_space_free(bus_space_tag_t t, bus_space_handle_t bsh,
+                                   bus_size_t size);
+
+static __inline void
+bus_space_free(bus_space_tag_t t __unused, bus_space_handle_t bsh __unused,
+              bus_size_t size __unused)
+{
+}
+
+
+/*
+ * Read a 1, 2, 4, or 8 byte quantity from bus space
+ * described by tag/handle/offset.
+ */
+static __inline u_int8_t bus_space_read_1(bus_space_tag_t tag,
+                                         bus_space_handle_t handle,
+                                         bus_size_t offset);
+
+static __inline u_int16_t bus_space_read_2(bus_space_tag_t tag,
+                                          bus_space_handle_t handle,
+                                          bus_size_t offset);
+
+static __inline u_int32_t bus_space_read_4(bus_space_tag_t tag,
+                                          bus_space_handle_t handle,
+                                          bus_size_t offset);
+
+static __inline u_int8_t
+bus_space_read_1(bus_space_tag_t tag, bus_space_handle_t handle,
+                bus_size_t offset)
+{
+
+       if (tag == AMD64_BUS_SPACE_IO)
+               return (inb(handle + offset));
+       return (*(volatile u_int8_t *)(handle + offset));
+}
+
+static __inline u_int16_t
+bus_space_read_2(bus_space_tag_t tag, bus_space_handle_t handle,
+                bus_size_t offset)
+{
+
+       if (tag == AMD64_BUS_SPACE_IO)
+               return (inw(handle + offset));
+       return (*(volatile u_int16_t *)(handle + offset));
+}
+
+static __inline u_int32_t
+bus_space_read_4(bus_space_tag_t tag, bus_space_handle_t handle,
+                bus_size_t offset)
+{
+
+       if (tag == AMD64_BUS_SPACE_IO)
+               return (inl(handle + offset));
+       return (*(volatile u_int32_t *)(handle + offset));
+}
+
+#if 0  /* Cause a link error for bus_space_read_8 */
+#define        bus_space_read_8(t, h, o)       !!! bus_space_read_8 unimplemented !!!
+#endif
+
+/*
+ * Read `count' 1, 2, 4, or 8 byte quantities from bus space
+ * described by tag/handle/offset and copy into buffer provided.
+ */
+static __inline void bus_space_read_multi_1(bus_space_tag_t tag,
+                                           bus_space_handle_t bsh,
+                                           bus_size_t offset, u_int8_t *addr,
+                                           size_t count);
+
+static __inline void bus_space_read_multi_2(bus_space_tag_t tag,
+                                           bus_space_handle_t bsh,
+                                           bus_size_t offset, u_int16_t *addr,
+                                           size_t count);
+
+static __inline void bus_space_read_multi_4(bus_space_tag_t tag,
+                                           bus_space_handle_t bsh,
+                                           bus_size_t offset, u_int32_t *addr,
+                                           size_t count);
+
+static __inline void
+bus_space_read_multi_1(bus_space_tag_t tag, bus_space_handle_t bsh,
+                      bus_size_t offset, u_int8_t *addr, size_t count)
+{
+
+       if (tag == AMD64_BUS_SPACE_IO)
+               insb(bsh + offset, addr, count);
+       else {
+               __asm __volatile("                              \n\
+                       cld                                     \n\
+               1:      movb (%2),%%al                          \n\
+                       stosb                                   \n\
+                       loop 1b"                                :
+                   "=D" (addr), "=c" (count)                   :
+                   "r" (bsh + offset), "0" (addr), "1" (count) :
+                   "%eax", "memory");
+       }
+}
+
+static __inline void
+bus_space_read_multi_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+                      bus_size_t offset, u_int16_t *addr, size_t count)
+{
+
+       if (tag == AMD64_BUS_SPACE_IO)
+               insw(bsh + offset, addr, count);
+       else {
+               __asm __volatile("                              \n\
+                       cld                                     \n\
+               1:      movw (%2),%%ax                          \n\
+                       stosw                                   \n\
+                       loop 1b"                                :
+                   "=D" (addr), "=c" (count)                   :
+                   "r" (bsh + offset), "0" (addr), "1" (count) :
+                   "%eax", "memory");
+       }
+}
+
+static __inline void
+bus_space_read_multi_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+                      bus_size_t offset, u_int32_t *addr, size_t count)
+{
+
+       if (tag == AMD64_BUS_SPACE_IO)
+               insl(bsh + offset, addr, count);
+       else {
+               __asm __volatile("                              \n\
+                       cld                                     \n\
+               1:      movl (%2),%%eax                         \n\
+                       stosl                                   \n\
+                       loop 1b"                                :
+                   "=D" (addr), "=c" (count)                   :
+                   "r" (bsh + offset), "0" (addr), "1" (count) :
+                   "%eax", "memory");
+       }
+}
+
+#if 0  /* Cause a link error for bus_space_read_multi_8 */
+#define        bus_space_read_multi_8  !!! bus_space_read_multi_8 unimplemented !!!
+#endif
+
+/*
+ * Read `count' 1, 2, 4, or 8 byte quantities from bus space
+ * described by tag/handle and starting at `offset' and copy into
+ * buffer provided.
+ */
+static __inline void bus_space_read_region_1(bus_space_tag_t tag,
+                                            bus_space_handle_t bsh,
+                                            bus_size_t offset, u_int8_t *addr,
+                                            size_t count);
+
+static __inline void bus_space_read_region_2(bus_space_tag_t tag,
+                                            bus_space_handle_t bsh,
+                                            bus_size_t offset, u_int16_t *addr,
+                                            size_t count);
+
+static __inline void bus_space_read_region_4(bus_space_tag_t tag,
+                                            bus_space_handle_t bsh,
+                                            bus_size_t offset, u_int32_t *addr,
+                                            size_t count);
+
+
+static __inline void
+bus_space_read_region_1(bus_space_tag_t tag, bus_space_handle_t bsh,
+                       bus_size_t offset, u_int8_t *addr, size_t count)
+{
+
+       if (tag == AMD64_BUS_SPACE_IO) {
+               int _port_ = bsh + offset;
+               __asm __volatile("                              \n\
+                       cld                                     \n\
+               1:      inb %w2,%%al                            \n\
+                       stosb                                   \n\
+                       incl %2                                 \n\
+                       loop 1b"                                :
+                   "=D" (addr), "=c" (count), "=d" (_port_)    :
+                   "0" (addr), "1" (count), "2" (_port_)       :
+                   "%eax", "memory", "cc");
+       } else {
+               bus_space_handle_t _port_ = bsh + offset;
+               __asm __volatile("                              \n\
+                       cld                                     \n\
+                       repne                                   \n\
+                       movsb"                                  :
+                   "=D" (addr), "=c" (count), "=S" (_port_)    :
+                   "0" (addr), "1" (count), "2" (_port_)       :
+                   "memory", "cc");
+       }
+}
+
+static __inline void
+bus_space_read_region_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+                       bus_size_t offset, u_int16_t *addr, size_t count)
+{
+
+       if (tag == AMD64_BUS_SPACE_IO) {
+               int _port_ = bsh + offset;
+               __asm __volatile("                              \n\
+                       cld                                     \n\
+               1:      inw %w2,%%ax                            \n\
+                       stosw                                   \n\
+                       addl $2,%2                              \n\
+                       loop 1b"                                :
+                   "=D" (addr), "=c" (count), "=d" (_port_)    :
+                   "0" (addr), "1" (count), "2" (_port_)       :
+                   "%eax", "memory", "cc");
+       } else {
+               bus_space_handle_t _port_ = bsh + offset;
+               __asm __volatile("                              \n\
+                       cld                                     \n\
+                       repne                                   \n\
+                       movsw"                                  :
+                   "=D" (addr), "=c" (count), "=S" (_port_)    :
+                   "0" (addr), "1" (count), "2" (_port_)       :
+                   "memory", "cc");
+       }
+}
+
+static __inline void
+bus_space_read_region_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+                       bus_size_t offset, u_int32_t *addr, size_t count)
+{
+
+       if (tag == AMD64_BUS_SPACE_IO) {
+               int _port_ = bsh + offset;
+               __asm __volatile("                              \n\
+                       cld                                     \n\
+               1:      inl %w2,%%eax                           \n\
+                       stosl                                   \n\
+                       addl $4,%2                              \n\
+                       loop 1b"                                :
+                   "=D" (addr), "=c" (count), "=d" (_port_)    :
+                   "0" (addr), "1" (count), "2" (_port_)       :
+                   "%eax", "memory", "cc");
+       } else {
+               bus_space_handle_t _port_ = bsh + offset;
+               __asm __volatile("                              \n\
+                       cld                                     \n\
+                       repne                                   \n\
+                       movsl"                                  :
+                   "=D" (addr), "=c" (count), "=S" (_port_)    :
+                   "0" (addr), "1" (count), "2" (_port_)       :
+                   "memory", "cc");
+       }
+}
+
+#if 0  /* Cause a link error for bus_space_read_region_8 */
+#define        bus_space_read_region_8 !!! bus_space_read_region_8 unimplemented !!!
+#endif
+
+/*
+ * Write the 1, 2, 4, or 8 byte value `value' to bus space
+ * described by tag/handle/offset.
+ */
+
+static __inline void bus_space_write_1(bus_space_tag_t tag,
+                                      bus_space_handle_t bsh,
+                                      bus_size_t offset, u_int8_t value);
+
+static __inline void bus_space_write_2(bus_space_tag_t tag,
+                                      bus_space_handle_t bsh,
+                                      bus_size_t offset, u_int16_t value);
+
+static __inline void bus_space_write_4(bus_space_tag_t tag,
+                                      bus_space_handle_t bsh,
+                                      bus_size_t offset, u_int32_t value);
+
+static __inline void
+bus_space_write_1(bus_space_tag_t tag, bus_space_handle_t bsh,
+                      bus_size_t offset, u_int8_t value)
+{
+
+       if (tag == AMD64_BUS_SPACE_IO)
+               outb(bsh + offset, value);
+       else
+               *(volatile u_int8_t *)(bsh + offset) = value;
+}
+
+static __inline void
+bus_space_write_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+                      bus_size_t offset, u_int16_t value)
+{
+
+       if (tag == AMD64_BUS_SPACE_IO)
+               outw(bsh + offset, value);
+       else
+               *(volatile u_int16_t *)(bsh + offset) = value;
+}
+
+static __inline void
+bus_space_write_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+                      bus_size_t offset, u_int32_t value)
+{
+
+       if (tag == AMD64_BUS_SPACE_IO)
+               outl(bsh + offset, value);
+       else
+               *(volatile u_int32_t *)(bsh + offset) = value;
+}
+
+#if 0  /* Cause a link error for bus_space_write_8 */
+#define        bus_space_write_8       !!! bus_space_write_8 not implemented !!!
+#endif
+
+/*
+ * Write `count' 1, 2, 4, or 8 byte quantities from the buffer
+ * provided to bus space described by tag/handle/offset.
+ */
+
+static __inline void bus_space_write_multi_1(bus_space_tag_t tag,
+                                            bus_space_handle_t bsh,
+                                            bus_size_t offset,
+                                            const u_int8_t *addr,
+                                            size_t count);
+static __inline void bus_space_write_multi_2(bus_space_tag_t tag,
+                                            bus_space_handle_t bsh,
+                                            bus_size_t offset,
+                                            const u_int16_t *addr,
+                                            size_t count);
+
+static __inline void bus_space_write_multi_4(bus_space_tag_t tag,
+                                            bus_space_handle_t bsh,
+                                            bus_size_t offset,
+                                            const u_int32_t *addr,
+                                            size_t count);
+
+static __inline void
+bus_space_write_multi_1(bus_space_tag_t tag, bus_space_handle_t bsh,
+                       bus_size_t offset, const u_int8_t *addr, size_t count)
+{
+
+       if (tag == AMD64_BUS_SPACE_IO)
+               outsb(bsh + offset, addr, count);
+       else {
+               __asm __volatile("                              \n\
+                       cld                                     \n\
+               1:      lodsb                                   \n\
+                       movb %%al,(%2)                          \n\
+                       loop 1b"                                :
+                   "=S" (addr), "=c" (count)                   :
+                   "r" (bsh + offset), "0" (addr), "1" (count) :
+                   "%eax", "memory", "cc");
+       }
+}
+
+static __inline void
+bus_space_write_multi_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+                       bus_size_t offset, const u_int16_t *addr, size_t count)
+{
+
+       if (tag == AMD64_BUS_SPACE_IO)
+               outsw(bsh + offset, addr, count);
+       else {
+               __asm __volatile("                              \n\
+                       cld                                     \n\
+               1:      lodsw                                   \n\
+                       movw %%ax,(%2)                          \n\
+                       loop 1b"                                :
+                   "=S" (addr), "=c" (count)                   :
+                   "r" (bsh + offset), "0" (addr), "1" (count) :
+                   "%eax", "memory", "cc");
+       }
+}
+
+static __inline void
+bus_space_write_multi_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+                       bus_size_t offset, const u_int32_t *addr, size_t count)
+{
+
+       if (tag == AMD64_BUS_SPACE_IO)
+               outsl(bsh + offset, addr, count);
+       else {
+               __asm __volatile("                              \n\
+                       cld                                     \n\
+               1:      lodsl                                   \n\
+                       movl %%eax,(%2)                         \n\
+                       loop 1b"                                :
+                   "=S" (addr), "=c" (count)                   :
+                   "r" (bsh + offset), "0" (addr), "1" (count) :
+                   "%eax", "memory", "cc");
+       }
+}
+
+#if 0  /* Cause a link error for bus_space_write_multi_8 */
+#define        bus_space_write_multi_8(t, h, o, a, c)                          \
+                       !!! bus_space_write_multi_8 unimplemented !!!
+#endif
+
+/*
+ * Write `count' 1, 2, 4, or 8 byte quantities from the buffer provided
+ * to bus space described by tag/handle starting at `offset'.
+ */
+
+static __inline void bus_space_write_region_1(bus_space_tag_t tag,
+                                             bus_space_handle_t bsh,
+                                             bus_size_t offset,
+                                             const u_int8_t *addr,
+                                             size_t count);
+static __inline void bus_space_write_region_2(bus_space_tag_t tag,
+                                             bus_space_handle_t bsh,
+                                             bus_size_t offset,
+                                             const u_int16_t *addr,
+                                             size_t count);
+static __inline void bus_space_write_region_4(bus_space_tag_t tag,
+                                             bus_space_handle_t bsh,
+                                             bus_size_t offset,
+                                             const u_int32_t *addr,
+                                             size_t count);
+
+static __inline void
+bus_space_write_region_1(bus_space_tag_t tag, bus_space_handle_t bsh,
+                        bus_size_t offset, const u_int8_t *addr, size_t count)
+{
+
+       if (tag == AMD64_BUS_SPACE_IO) {
+               int _port_ = bsh + offset;
+               __asm __volatile("                              \n\
+                       cld                                     \n\
+               1:      lodsb                                   \n\
+                       outb %%al,%w0                           \n\
+                       incl %0                                 \n\
+                       loop 1b"                                :
+                   "=d" (_port_), "=S" (addr), "=c" (count)    :
+                   "0" (_port_), "1" (addr), "2" (count)       :
+                   "%eax", "memory", "cc");
+       } else {
+               bus_space_handle_t _port_ = bsh + offset;
+               __asm __volatile("                              \n\
+                       cld                                     \n\
+                       repne                                   \n\
+                       movsb"                                  :
+                   "=D" (_port_), "=S" (addr), "=c" (count)    :
+                   "0" (_port_), "1" (addr), "2" (count)       :
+                   "memory", "cc");
+       }
+}
+
+static __inline void
+bus_space_write_region_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+                        bus_size_t offset, const u_int16_t *addr, size_t count)
+{
+
+       if (tag == AMD64_BUS_SPACE_IO) {
+               int _port_ = bsh + offset;
+               __asm __volatile("                              \n\
+                       cld                                     \n\
+               1:      lodsw                                   \n\
+                       outw %%ax,%w0                           \n\
+                       addl $2,%0                              \n\
+                       loop 1b"                                :
+                   "=d" (_port_), "=S" (addr), "=c" (count)    :
+                   "0" (_port_), "1" (addr), "2" (count)       :
+                   "%eax", "memory", "cc");
+       } else {
+               bus_space_handle_t _port_ = bsh + offset;
+               __asm __volatile("                              \n\
+                       cld                                     \n\
+                       repne                                   \n\
+                       movsw"                                  :
+                   "=D" (_port_), "=S" (addr), "=c" (count)    :
+                   "0" (_port_), "1" (addr), "2" (count)       :
+                   "memory", "cc");
+       }
+}
+
+static __inline void
+bus_space_write_region_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+                        bus_size_t offset, const u_int32_t *addr, size_t count)
+{
+
+       if (tag == AMD64_BUS_SPACE_IO) {
+               int _port_ = bsh + offset;
+               __asm __volatile("                              \n\
+                       cld                                     \n\
+               1:      lodsl                                   \n\
+                       outl %%eax,%w0                          \n\
+                       addl $4,%0                              \n\
+                       loop 1b"                                :
+                   "=d" (_port_), "=S" (addr), "=c" (count)    :
+                   "0" (_port_), "1" (addr), "2" (count)       :
+                   "%eax", "memory", "cc");
+       } else {
+               bus_space_handle_t _port_ = bsh + offset;
+               __asm __volatile("                              \n\
+                       cld                                     \n\
+                       repne                                   \n\
+                       movsl"                                  :
+                   "=D" (_port_), "=S" (addr), "=c" (count)    :
+                   "0" (_port_), "1" (addr), "2" (count)       :
+                   "memory", "cc");
+       }
+}
+
+#if 0  /* Cause a link error for bus_space_write_region_8 */
+#define        bus_space_write_region_8                                        \
+                       !!! bus_space_write_region_8 unimplemented !!!
+#endif
+
+/*
+ * Write the 1, 2, 4, or 8 byte value `val' to bus space described
+ * by tag/handle/offset `count' times.
+ */
+
+static __inline void bus_space_set_multi_1(bus_space_tag_t tag,
+                                          bus_space_handle_t bsh,
+                                          bus_size_t offset,
+                                          u_int8_t value, size_t count);
+static __inline void bus_space_set_multi_2(bus_space_tag_t tag,
+                                          bus_space_handle_t bsh,
+                                          bus_size_t offset,
+                                          u_int16_t value, size_t count);
+static __inline void bus_space_set_multi_4(bus_space_tag_t tag,
+                                          bus_space_handle_t bsh,
+                                          bus_size_t offset,
+                                          u_int32_t value, size_t count);
+
+static __inline void
+bus_space_set_multi_1(bus_space_tag_t tag, bus_space_handle_t bsh,
+                     bus_size_t offset, u_int8_t value, size_t count)
+{
+       bus_space_handle_t addr = bsh + offset;
+
+       if (tag == AMD64_BUS_SPACE_IO)
+               while (count--)
+                       outb(addr, value);
+       else
+               while (count--)
+                       *(volatile u_int8_t *)(addr) = value;
+}
+
+static __inline void
+bus_space_set_multi_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+                    bus_size_t offset, u_int16_t value, size_t count)
+{
+       bus_space_handle_t addr = bsh + offset;
+
+       if (tag == AMD64_BUS_SPACE_IO)
+               while (count--)
+                       outw(addr, value);
+       else
+               while (count--)
+                       *(volatile u_int16_t *)(addr) = value;
+}
+
+static __inline void
+bus_space_set_multi_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+                     bus_size_t offset, u_int32_t value, size_t count)
+{
+       bus_space_handle_t addr = bsh + offset;
+
+       if (tag == AMD64_BUS_SPACE_IO)
+               while (count--)
+                       outl(addr, value);
+       else
+               while (count--)
+                       *(volatile u_int32_t *)(addr) = value;
+}
+
+#if 0  /* Cause a link error for bus_space_set_multi_8 */
+#define        bus_space_set_multi_8 !!! bus_space_set_multi_8 unimplemented !!!
+#endif
+
+/*
+ * Write `count' 1, 2, 4, or 8 byte value `val' to bus space described
+ * by tag/handle starting at `offset'.
+ */
+
+static __inline void bus_space_set_region_1(bus_space_tag_t tag,
+                                           bus_space_handle_t bsh,
+                                           bus_size_t offset, u_int8_t value,
+                                           size_t count);
+static __inline void bus_space_set_region_2(bus_space_tag_t tag,
+                                           bus_space_handle_t bsh,
+                                           bus_size_t offset, u_int16_t value,
+                                           size_t count);
+static __inline void bus_space_set_region_4(bus_space_tag_t tag,
+                                           bus_space_handle_t bsh,
+                                           bus_size_t offset, u_int32_t value,
+                                           size_t count);
+
+static __inline void
+bus_space_set_region_1(bus_space_tag_t tag, bus_space_handle_t bsh,
+                      bus_size_t offset, u_int8_t value, size_t count)
+{
+       bus_space_handle_t addr = bsh + offset;
+
+       if (tag == AMD64_BUS_SPACE_IO)
+               for (; count != 0; count--, addr++)
+                       outb(addr, value);
+       else
+               for (; count != 0; count--, addr++)
+                       *(volatile u_int8_t *)(addr) = value;
+}
+
+static __inline void
+bus_space_set_region_2(bus_space_tag_t tag, bus_space_handle_t bsh,
+                      bus_size_t offset, u_int16_t value, size_t count)
+{
+       bus_space_handle_t addr = bsh + offset;
+
+       if (tag == AMD64_BUS_SPACE_IO)
+               for (; count != 0; count--, addr += 2)
+                       outw(addr, value);
+       else
+               for (; count != 0; count--, addr += 2)
+                       *(volatile u_int16_t *)(addr) = value;
+}
+
+static __inline void
+bus_space_set_region_4(bus_space_tag_t tag, bus_space_handle_t bsh,
+                      bus_size_t offset, u_int32_t value, size_t count)
+{
+       bus_space_handle_t addr = bsh + offset;
+
+       if (tag == AMD64_BUS_SPACE_IO)
+               for (; count != 0; count--, addr += 4)
+                       outl(addr, value);
+       else
+               for (; count != 0; count--, addr += 4)
+                       *(volatile u_int32_t *)(addr) = value;
+}
+
+#if 0  /* Cause a link error for bus_space_set_region_8 */
+#define        bus_space_set_region_8  !!! bus_space_set_region_8 unimplemented !!!
+#endif
+
+/*
+ * Copy `count' 1, 2, 4, or 8 byte values from bus space starting
+ * at tag/bsh1/off1 to bus space starting at tag/bsh2/off2.
+ */
+
+static __inline void bus_space_copy_region_1(bus_space_tag_t tag,
+                                            bus_space_handle_t bsh1,
+                                            bus_size_t off1,
+                                            bus_space_handle_t bsh2,
+                                            bus_size_t off2, size_t count);
+
+static __inline void bus_space_copy_region_2(bus_space_tag_t tag,
+                                            bus_space_handle_t bsh1,
+                                            bus_size_t off1,
+                                            bus_space_handle_t bsh2,
+                                            bus_size_t off2, size_t count);
+
+static __inline void bus_space_copy_region_4(bus_space_tag_t tag,
+                                            bus_space_handle_t bsh1,
+                                            bus_size_t off1,
+                                            bus_space_handle_t bsh2,
+                                            bus_size_t off2, size_t count);
+
+static __inline void
+bus_space_copy_region_1(bus_space_tag_t tag, bus_space_handle_t bsh1,
+                       bus_size_t off1, bus_space_handle_t bsh2,
+                       bus_size_t off2, size_t count)
+{
+       bus_space_handle_t addr1 = bsh1 + off1;
+       bus_space_handle_t addr2 = bsh2 + off2;
+
+       if (tag == AMD64_BUS_SPACE_IO) {
+               if (addr1 >= addr2) {
+                       /* src after dest: copy forward */
+                       for (; count != 0; count--, addr1++, addr2++)
+                               outb(addr2, inb(addr1));
+               } else {
+                       /* dest after src: copy backwards */
+                       for (addr1 += (count - 1), addr2 += (count - 1);
+                           count != 0; count--, addr1--, addr2--)
+                               outb(addr2, inb(addr1));
+               }
+       } else {
+               if (addr1 >= addr2) {
+                       /* src after dest: copy forward */
+                       for (; count != 0; count--, addr1++, addr2++)
+                               *(volatile u_int8_t *)(addr2) =
+                                   *(volatile u_int8_t *)(addr1);
+               } else {
+                       /* dest after src: copy backwards */
+                       for (addr1 += (count - 1), addr2 += (count - 1);
+                           count != 0; count--, addr1--, addr2--)
+                               *(volatile u_int8_t *)(addr2) =
+                                   *(volatile u_int8_t *)(addr1);
+               }
+       }
+}
+
+static __inline void
+bus_space_copy_region_2(bus_space_tag_t tag, bus_space_handle_t bsh1,
+                       bus_size_t off1, bus_space_handle_t bsh2,
+                       bus_size_t off2, size_t count)
+{
+       bus_space_handle_t addr1 = bsh1 + off1;
+       bus_space_handle_t addr2 = bsh2 + off2;
+
+       if (tag == AMD64_BUS_SPACE_IO) {
+               if (addr1 >= addr2) {
+                       /* src after dest: copy forward */
+                       for (; count != 0; count--, addr1 += 2, addr2 += 2)
+                               outw(addr2, inw(addr1));
+               } else {
+                       /* dest after src: copy backwards */
+                       for (addr1 += 2 * (count - 1), addr2 += 2 * (count - 1);
+                           count != 0; count--, addr1 -= 2, addr2 -= 2)
+                               outw(addr2, inw(addr1));
+               }
+       } else {
+               if (addr1 >= addr2) {
+                       /* src after dest: copy forward */
+                       for (; count != 0; count--, addr1 += 2, addr2 += 2)
+                               *(volatile u_int16_t *)(addr2) =
+                                   *(volatile u_int16_t *)(addr1);
+               } else {
+                       /* dest after src: copy backwards */
+                       for (addr1 += 2 * (count - 1), addr2 += 2 * (count - 1);
+                           count != 0; count--, addr1 -= 2, addr2 -= 2)
+                               *(volatile u_int16_t *)(addr2) =
+                                   *(volatile u_int16_t *)(addr1);
+               }
+       }
+}
+
+static __inline void
+bus_space_copy_region_4(bus_space_tag_t tag, bus_space_handle_t bsh1,
+                       bus_size_t off1, bus_space_handle_t bsh2,
+                       bus_size_t off2, size_t count)
+{
+       bus_space_handle_t addr1 = bsh1 + off1;
+       bus_space_handle_t addr2 = bsh2 + off2;
+
+       if (tag == AMD64_BUS_SPACE_IO) {
+               if (addr1 >= addr2) {
+                       /* src after dest: copy forward */
+                       for (; count != 0; count--, addr1 += 4, addr2 += 4)
+                               outl(addr2, inl(addr1));
+               } else {
+                       /* dest after src: copy backwards */
+                       for (addr1 += 4 * (count - 1), addr2 += 4 * (count - 1);
+                           count != 0; count--, addr1 -= 4, addr2 -= 4)
+                               outl(addr2, inl(addr1));
+               }
+       } else {
+               if (addr1 >= addr2) {
+                       /* src after dest: copy forward */
+                       for (; count != 0; count--, addr1 += 4, addr2 += 4)
+                               *(volatile u_int32_t *)(addr2) =
+                                   *(volatile u_int32_t *)(addr1);
+               } else {
+                       /* dest after src: copy backwards */
+                       for (addr1 += 4 * (count - 1), addr2 += 4 * (count - 1);
+                           count != 0; count--, addr1 -= 4, addr2 -= 4)
+                               *(volatile u_int32_t *)(addr2) =
+                                   *(volatile u_int32_t *)(addr1);
+               }
+       }
+}
+
+#if 0  /* Cause a link error for bus_space_copy_8 */
+#define        bus_space_copy_region_8 !!! bus_space_copy_region_8 unimplemented !!!
+#endif
+
+/*
+ * Bus read/write barrier methods.
+ *
+ *     void bus_space_barrier(bus_space_tag_t tag, bus_space_handle_t bsh,
+ *                            bus_size_t offset, bus_size_t len, int flags);
+ *
+ *
+ * Note that BUS_SPACE_BARRIER_WRITE doesn't do anything other than
+ * prevent reordering by the compiler; all Intel x86 processors currently
+ * retire operations outside the CPU in program order.
+ */
+#define        BUS_SPACE_BARRIER_READ  0x01            /* force read barrier */
+#define        BUS_SPACE_BARRIER_WRITE 0x02            /* force write barrier */
+
+static __inline void
+bus_space_barrier(bus_space_tag_t tag __unused, bus_space_handle_t bsh __unused,
+                 bus_size_t offset __unused, bus_size_t len __unused, int flags)
+{
+       if (flags & BUS_SPACE_BARRIER_READ)
+               __asm __volatile("lock; addl $0,0(%%rsp)" : : : "memory");
+       else
+               __asm __volatile("" : : : "memory");
+}
+
+/*
+ * Stream accesses are the same as normal accesses on amd64; there are no
+ * supported bus systems with an endianess different from the host one.
+ */
+#define        bus_space_read_stream_1(t, h, o)        bus_space_read_1((t), (h), (o))
+#define        bus_space_read_stream_2(t, h, o)        bus_space_read_2((t), (h), (o))
+#define        bus_space_read_stream_4(t, h, o)        bus_space_read_4((t), (h), (o))
+
+#define        bus_space_read_multi_stream_1(t, h, o, a, c) \
+       bus_space_read_multi_1((t), (h), (o), (a), (c))
+#define        bus_space_read_multi_stream_2(t, h, o, a, c) \
+       bus_space_read_multi_2((t), (h), (o), (a), (c))
+#define        bus_space_read_multi_stream_4(t, h, o, a, c) \
+       bus_space_read_multi_4((t), (h), (o), (a), (c))
+
+#define        bus_space_write_stream_1(t, h, o, v) \
+       bus_space_write_1((t), (h), (o), (v))
+#define        bus_space_write_stream_2(t, h, o, v) \
+       bus_space_write_2((t), (h), (o), (v))
+#define        bus_space_write_stream_4(t, h, o, v) \
+       bus_space_write_4((t), (h), (o), (v))
+
+#define        bus_space_write_multi_stream_1(t, h, o, a, c) \
+       bus_space_write_multi_1((t), (h), (o), (a), (c))
+#define        bus_space_write_multi_stream_2(t, h, o, a, c) \
+       bus_space_write_multi_2((t), (h), (o), (a), (c))
+#define        bus_space_write_multi_stream_4(t, h, o, a, c) \
+       bus_space_write_multi_4((t), (h), (o), (a), (c))
+
+#define        bus_space_set_multi_stream_1(t, h, o, v, c) \
+       bus_space_set_multi_1((t), (h), (o), (v), (c))
+#define        bus_space_set_multi_stream_2(t, h, o, v, c) \
+       bus_space_set_multi_2((t), (h), (o), (v), (c))
+#define        bus_space_set_multi_stream_4(t, h, o, v, c) \
+       bus_space_set_multi_4((t), (h), (o), (v), (c))
+
+#define        bus_space_read_region_stream_1(t, h, o, a, c) \
+       bus_space_read_region_1((t), (h), (o), (a), (c))
+#define        bus_space_read_region_stream_2(t, h, o, a, c) \
+       bus_space_read_region_2((t), (h), (o), (a), (c))
+#define        bus_space_read_region_stream_4(t, h, o, a, c) \
+       bus_space_read_region_4((t), (h), (o), (a), (c))
+
+#define        bus_space_write_region_stream_1(t, h, o, a, c) \
+       bus_space_write_region_1((t), (h), (o), (a), (c))
+#define        bus_space_write_region_stream_2(t, h, o, a, c) \
+       bus_space_write_region_2((t), (h), (o), (a), (c))
+#define        bus_space_write_region_stream_4(t, h, o, a, c) \
+       bus_space_write_region_4((t), (h), (o), (a), (c))
+
+#define        bus_space_set_region_stream_1(t, h, o, v, c) \
+       bus_space_set_region_1((t), (h), (o), (v), (c))
+#define        bus_space_set_region_stream_2(t, h, o, v, c) \
+       bus_space_set_region_2((t), (h), (o), (v), (c))
+#define        bus_space_set_region_stream_4(t, h, o, v, c) \
+       bus_space_set_region_4((t), (h), (o), (v), (c))
+
+#define        bus_space_copy_region_stream_1(t, h1, o1, h2, o2, c) \
+       bus_space_copy_region_1((t), (h1), (o1), (h2), (o2), (c))
+#define        bus_space_copy_region_stream_2(t, h1, o1, h2, o2, c) \
+       bus_space_copy_region_2((t), (h1), (o1), (h2), (o2), (c))
+#define        bus_space_copy_region_stream_4(t, h1, o1, h2, o2, c) \
+       bus_space_copy_region_4((t), (h1), (o1), (h2), (o2), (c))
+
+#endif /* _CPU_BUS_DMA_H_ */
index 43c32fe..c4af70b 100644 (file)
  *
  *     from: @(#)cpu.h 5.4 (Berkeley) 5/9/91
  * $FreeBSD: src/sys/i386/include/cpu.h,v 1.43.2.2 2001/06/15 09:37:57 scottl Exp $
- * $DragonFly: src/sys/cpu/amd64/include/cpu.h,v 1.1 2007/08/21 19:40:24 corecode Exp $
+ * $DragonFly: src/sys/cpu/amd64/include/cpu.h,v 1.2 2007/09/23 04:29:30 yanyh Exp $
  */
 
 #ifndef _CPU_CPU_H_
 #define        _CPU_CPU_H_
 
+#define CLKF_INTR(framep)      (mycpu->gd_intr_nesting_level > 1 || (curthread->td_flags & TDF_INTTHREAD))
+#define CLKF_PC(framep)                ((framep)->if_rip)
+
+/*
+ * Preempt the current process if in interrupt from user mode,
+ * or after the current trap/syscall if in system mode.
+ *
+ * We do not have to use a locked bus cycle but we do have to use an
+ * atomic instruction because an interrupt on the local cpu can modify
+ * the gd_reqflags field.
+ */
+#define need_lwkt_resched()    \
+       atomic_set_int_nonlocked(&mycpu->gd_reqflags, RQF_AST_LWKT_RESCHED)
+#define need_user_resched()    \
+       atomic_set_int_nonlocked(&mycpu->gd_reqflags, RQF_AST_USER_RESCHED)
+#define need_proftick()         \
+       atomic_set_int_nonlocked(&mycpu->gd_reqflags, RQF_AST_OWEUPC)
+#define signotify()    \
+       atomic_set_int_nonlocked(&mycpu->gd_reqflags, RQF_AST_SIGNAL)
+#define sigupcall()             \
+       atomic_set_int_nonlocked(&mycpu->gd_reqflags, RQF_AST_UPCALL)
+#define clear_lwkt_resched()    \
+       atomic_clear_int_nonlocked(&mycpu->gd_reqflags, RQF_AST_LWKT_RESCHED)
+#define clear_user_resched()   \
+       atomic_clear_int_nonlocked(&mycpu->gd_reqflags, RQF_AST_USER_RESCHED)
+#define user_resched_wanted()  \
+       (mycpu->gd_reqflags & RQF_AST_USER_RESCHED)
+#define lwkt_resched_wanted()   \
+       (mycpu->gd_reqflags & RQF_AST_LWKT_RESCHED)
+
 /*
  * CTL_MACHDEP definitions.
  */
        { "wall_cmos_clock", CTLTYPE_INT }, \
 }
 
+#ifdef _KERNEL
+void   fork_trampoline (void);
+void   generic_lwp_return (struct lwp *, struct trapframe *);
+void   fork_return (struct lwp *, struct trapframe *);
+#endif
+
 #endif /* !_CPU_CPU_H_ */
index 656da22..a0d21f8 100644 (file)
@@ -32,7 +32,7 @@
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/amd64/include/cpufunc.h,v 1.139 2004/01/28 23:53:04 peter Exp $
- * $DragonFly: src/sys/cpu/amd64/include/cpufunc.h,v 1.1 2007/08/21 19:40:24 corecode Exp $
+ * $DragonFly: src/sys/cpu/amd64/include/cpufunc.h,v 1.2 2007/09/23 04:29:30 yanyh Exp $
  */
 
 /*
@@ -69,6 +69,12 @@ breakpoint(void)
        __asm __volatile("int $3");
 }
 
+static __inline void
+cpu_pause(void)
+{
+       __asm __volatile("pause");
+}
+
 static __inline u_int
 bsfl(u_int mask)
 {
@@ -120,11 +126,72 @@ do_cpuid(u_int ax, u_int *p)
 }
 
 static __inline void
-enable_intr(void)
+cpu_enable_intr(void)
 {
        __asm __volatile("sti");
 }
 
+/*
+ * Cpu and compiler memory ordering fence.  mfence ensures strong read and
+ * write ordering.
+ *
+ * A serializing or fence instruction is required here.  A locked bus
+ * cycle on data for which we already own cache mastership is the most
+ * portable.
+ */
+static __inline void
+cpu_mfence(void)
+{
+#ifdef SMP
+       __asm __volatile("lock; addl $0,(%%esp)" : : : "memory");
+#else
+       __asm __volatile("" : : : "memory");
+#endif
+}
+
+/*
+ * cpu_lfence() ensures strong read ordering for reads issued prior
+ * to the instruction verses reads issued afterwords.
+ *
+ * A serializing or fence instruction is required here.  A locked bus
+ * cycle on data for which we already own cache mastership is the most
+ * portable.
+ */
+static __inline void
+cpu_lfence(void)
+{
+#ifdef SMP
+       __asm __volatile("lock; addl $0,(%%esp)" : : : "memory");
+#else
+       __asm __volatile("" : : : "memory");
+#endif
+}
+
+/*
+ * cpu_sfence() ensures strong write ordering for writes issued prior
+ * to the instruction verses writes issued afterwords.  Writes are
+ * ordered on intel cpus so we do not actually have to do anything.
+ */
+static __inline void
+cpu_sfence(void)
+{
+       __asm __volatile("" : : : "memory");
+}
+
+/*
+ * cpu_ccfence() prevents the compiler from reordering instructions, in
+ * particular stores, relative to the current cpu.  Use cpu_sfence() if
+ * you need to guarentee ordering by both the compiler and by the cpu.
+ *
+ * This also prevents the compiler from caching memory loads into local
+ * variables across the routine.
+ */
+static __inline void
+cpu_ccfence(void)
+{
+       __asm __volatile("" : : : "memory");
+}
+
 #ifdef _KERNEL
 
 #define        HAVE_INLINE_FFS
@@ -178,13 +245,6 @@ halt(void)
        __asm __volatile("hlt");
 }
 
-#if __GNUC__ < 2
-
-#define        inb(port)               inbv(port)
-#define        outb(port, data)        outbv(port, data)
-
-#else /* __GNUC >= 2 */
-
 /*
  * The following complications are to get around gcc not having a
  * constraint letter for the range 0..255.  We still put "d" in the
@@ -230,8 +290,6 @@ outbc(u_int port, u_char data)
        __asm __volatile("outb %0,%1" : : "a" (data), "id" ((u_short)(port)));
 }
 
-#endif /* __GNUC <= 2 */
-
 static __inline u_char
 inbv(u_int port)
 {
@@ -296,6 +354,16 @@ inw(u_int port)
        return (data);
 }
 
+static __inline u_int
+loadandclear(volatile u_int *addr)
+{
+       u_int   result;
+
+       __asm __volatile("xorl %0,%0; xchgl %1,%0"
+                       : "=&r" (result) : "m" (*addr));
+       return (result);
+}
+
 static __inline void
 outbv(u_int port, u_char data)
 {
@@ -720,6 +788,7 @@ intr_restore(register_t rflags)
 #else /* !__GNUC__ */
 
 int    breakpoint(void);
+void   cpu_pause(void);
 u_int  bsfl(u_int mask);
 u_int  bsrl(u_int mask);
 void   cpu_invlpg(u_long addr);
diff --git a/sys/cpu/amd64/include/cputypes.h b/sys/cpu/amd64/include/cputypes.h
new file mode 100644 (file)
index 0000000..2339d30
--- /dev/null
@@ -0,0 +1,52 @@
+/*-
+ * Copyright (c) 1993 Christopher G. Demetriou
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: src/sys/amd64/include/cputypes.h,v 1.19 2005/01/05 20:17:20 imp Exp $
+ * $DragonFly: src/sys/cpu/amd64/include/cputypes.h,v 1.1 2007/09/23 04:29:30 yanyh Exp $
+ */
+
+#ifndef _CPU_CPUTYPES_H_
+#define        _CPU_CPUTYPES_H_
+
+/*
+ * Classes of processor.
+ */
+#define        CPUCLASS_X86    0       /* X86 */
+#define        CPUCLASS_K8     1       /* K8 AMD64 class */
+
+/*
+ * Kinds of processor.
+ */
+#define        CPU_X86         0       /* Intel */
+#define        CPU_CLAWHAMMER  1       /* AMD Clawhammer */
+#define        CPU_SLEDGEHAMMER 2      /* AMD Sledgehammer */
+
+#ifndef LOCORE
+extern int     cpu;
+extern int     cpu_class;
+#endif
+
+#endif /* !_CPU_CPUTYPES_H_ */
diff --git a/sys/cpu/amd64/include/db_machdep.h b/sys/cpu/amd64/include/db_machdep.h
new file mode 100644 (file)
index 0000000..a64693a
--- /dev/null
@@ -0,0 +1,97 @@
+/*-
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
+ *  School of Computer Science
+ *  Carnegie Mellon University
+ *  Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ *
+ * $FreeBSD: src/sys/amd64/include/db_machdep.h,v 1.22 2005/01/05 20:17:20 imp Exp $
+ * $DragonFly: src/sys/cpu/amd64/include/db_machdep.h,v 1.1 2007/09/23 04:29:30 yanyh Exp $
+ */
+
+#ifndef _CPU_DB_MACHDEP_H_
+#define        _CPU_DB_MACHDEP_H_
+
+#include <cpu/frame.h>
+#include <cpu/trap.h>
+
+#define amd64_saved_state      trapframe
+
+typedef        vm_offset_t     db_addr_t;      /* address - unsigned */
+typedef        long            db_expr_t;      /* expression - signed */
+
+typedef struct amd64_saved_state db_regs_t;
+
+#ifdef _KERNEL
+extern db_regs_t        ddb_regs;       /* register state */
+#define DDB_REGS        (&ddb_regs)
+
+extern db_addr_t       PC_REGS(db_regs_t *regs);
+extern db_addr_t       SP_REGS(db_regs_t *regs);
+extern db_addr_t       BP_REGS(db_regs_t *regs);
+
+#endif
+
+#define        BKPT_INST       0xcc            /* breakpoint instruction */
+#define        BKPT_SIZE       (1)             /* size of breakpoint inst */
+#define        BKPT_SET(inst)  (BKPT_INST)
+
+#define FIXUP_PC_AFTER_BREAK    ddb_regs.tf_rip -= 1;
+
+#define db_clear_single_step(regs)     ((regs)->tf_rflags &= ~PSL_T)
+#define db_set_single_step(regs)       ((regs)->tf_rflags |=  PSL_T)
+
+#define        IS_BREAKPOINT_TRAP(type, code)  ((type) == T_BPTFLT)
+/*
+ * Watchpoints are not supported.  The debug exception type is in %dr6
+ * and not yet in the args to this macro.
+ */
+#define IS_WATCHPOINT_TRAP(type, code) 0
+
+#define        I_CALL          0xe8
+#define        I_CALLI         0xff
+#define        I_RET           0xc3
+#define        I_IRET          0xcf
+
+#define        inst_trap_return(ins)   (((ins)&0xff) == I_IRET)
+#define        inst_return(ins)        (((ins)&0xff) == I_RET)
+#define        inst_call(ins)          (((ins)&0xff) == I_CALL || \
+                                (((ins)&0xff) == I_CALLI && \
+                                 ((ins)&0x3800) == 0x1000))
+#define inst_load(ins)         0
+#define inst_store(ins)                0
+
+/*
+ * There no interesting addresses below _kstack = 0xefbfe000.  There
+ * are small absolute values for GUPROF, but we don't want to see them.
+ * Treat "negative" addresses below _kstack as non-small to allow for
+ * future reductions of _kstack and to avoid sign extension problems.
+ *
+ * There is one interesting symbol above -db_maxoff = 0xffff0000,
+ * namely _APTD = 0xfffff000.  Accepting this would mess up the
+ * printing of small negative offsets.  The next largest symbol is
+ * _APTmap = 0xffc00000.  Accepting this is OK (unless db_maxoff is
+ * set to >= 0x400000 - (max stack offset)).
+ */
+#define        DB_SMALL_VALUE_MAX      0x7fffffff
+#define        DB_SMALL_VALUE_MIN      (-0x400001)
+
+#endif /* !_CPU_DB_MACHDEP_H_ */
index 180c8b6..9d87acd 100644 (file)
@@ -24,7 +24,7 @@
  * SUCH DAMAGE.
  *
  * $FreeBSD: src/sys/amd64/include/elf.h,v 1.18 2004/08/03 08:21:48 dfr Exp $
- * $DragonFly: src/sys/cpu/amd64/include/elf.h,v 1.1 2007/08/21 19:40:24 corecode Exp $
+ * $DragonFly: src/sys/cpu/amd64/include/elf.h,v 1.2 2007/09/23 04:29:30 yanyh Exp $
  */
 
 #ifndef _CPU_ELF_H_
@@ -146,4 +146,15 @@ __ElfType(Auxinfo);
 #define        ELF_TARG_MACH   EM_X86_64
 #define        ELF_TARG_VER    1
 
+#ifdef _KERNEL
+/*
+ * On the i386 we load the dynamic linker where a userland call
+ * to mmap(0, ...) would put it.  The rationale behind this
+ * calculation is that it leaves room for the heap to grow to
+ * its maximum allowed size.
+ */
+#define ELF_RTLD_ADDR(vmspace) \
+       (round_page((vm_offset_t)(vmspace)->vm_daddr + maxdsiz))
+#endif
+
 #endif /* !_CPU_ELF_H_ */
index cef0377..825e68a 100644 (file)
@@ -36,7 +36,7 @@
  *
  *     from: @(#)frame.h       5.2 (Berkeley) 1/18/91
  * $FreeBSD: src/sys/amd64/include/frame.h,v 1.26 2003/11/08 04:39:22 peter Exp $
- * $DragonFly: src/sys/cpu/amd64/include/frame.h,v 1.1 2007/08/21 19:40:24 corecode Exp $
+ * $DragonFly: src/sys/cpu/amd64/include/frame.h,v 1.2 2007/09/23 04:29:30 yanyh Exp $
  */
 
 #ifndef _CPU_FRAME_H_
@@ -87,7 +87,7 @@ struct trapframe {
 /* Interrupt stack frame */
 
 struct intrframe {
-       /* vec */
+       register_t      if_vec; /* vec */
        /* ppl */
        /* fs XXX */
        /* es XXX */
@@ -117,6 +117,7 @@ struct intrframe {
        register_t      if_rflags;
        register_t      if_rsp;
        register_t      if_ss;
+       register_t      if_gs;
 };
 
 int    kdb_trap(int, int, struct trapframe *);
index 8b09251..1608a32 100644 (file)
@@ -32,7 +32,7 @@
  *
  *     @(#)limits.h    8.3 (Berkeley) 1/4/94
  * $FreeBSD: src/sys/i386/include/limits.h,v 1.14.2.2 2000/11/05 09:21:42 obrien Exp $
- * $DragonFly: src/sys/cpu/amd64/include/limits.h,v 1.1 2007/08/21 19:40:24 corecode Exp $
+ * $DragonFly: src/sys/cpu/amd64/include/limits.h,v 1.2 2007/09/23 04:29:30 yanyh Exp $
  */
 
 #ifndef _CPU_LIMITS_H_
@@ -89,6 +89,9 @@
 #if !defined(_POSIX_SOURCE)
 #define        SIZE_T_MAX      ULONG_MAX       /* max value for a size_t */
 
+#define GID_MAX                UINT_MAX        /* max value for a gid_t */
+#define UID_MAX                UINT_MAX        /* max value for a uid_t */
+
 /* Quads and long longs are the same size.  Ensure they stay in sync. */
 #define        UQUAD_MAX       ULLONG_MAX      /* max value for a uquad_t */
 #define        QUAD_MAX        LLONG_MAX       /* max value for a quad_t */
index d1f9f2f..d698eb9 100644 (file)
@@ -35,7 +35,7 @@
  *
  *     from: @(#)npx.h 5.3 (Berkeley) 1/18/91
  * $FreeBSD: src/sys/i386/include/npx.h,v 1.18.2.1 2001/08/15 01:23:52 peter Exp $
- * $DragonFly: src/sys/cpu/amd64/include/npx.h,v 1.1 2007/08/21 19:40:24 corecode Exp $
+ * $DragonFly: src/sys/cpu/amd64/include/npx.h,v 1.2 2007/09/23 04:29:30 yanyh Exp $
  */
 
 /*
@@ -156,6 +156,7 @@ struct trapframe;
 void   npxexit (void);
 void   npxinit (u_short control);
 void   npxsave (union savefpu *addr);
+int    npxdna(struct trapframe *);
 #endif
 
 #endif /* !_CPU_NPX_H_ */
index 0db43cc..ee4da86 100644 (file)
@@ -35,7 +35,7 @@
  *
  *     from: @(#)param.h       5.8 (Berkeley) 6/28/91
  * $FreeBSD: src/sys/i386/include/param.h,v 1.54.2.8 2002/08/31 21:15:55 dillon Exp $
- * $DragonFly: src/sys/cpu/amd64/include/param.h,v 1.2 2007/08/23 06:56:27 corecode Exp $
+ * $DragonFly: src/sys/cpu/amd64/include/param.h,v 1.3 2007/09/23 04:29:30 yanyh Exp $
  */
 
 #ifndef _CPU_PARAM_H_
 #define PML4MASK       (NPML4-1)
 #define NPML4EPG       (PAGE_SIZE/(sizeof (pml4_entry_t)))
 
-
 #define DEV_BSHIFT     9               /* log2(DEV_BSIZE) */
 #define DEV_BSIZE      (1<<DEV_BSHIFT)
 #define DEV_BMASK      (DEV_BSIZE - 1)
index 0d2e82d..5946d82 100644 (file)
@@ -23,7 +23,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $DragonFly: src/sys/cpu/amd64/include/pmap.h,v 1.1 2007/08/21 19:40:24 corecode Exp $
+ * $DragonFly: src/sys/cpu/amd64/include/pmap.h,v 1.2 2007/09/23 04:29:30 yanyh Exp $
  */
 #ifndef _CPU_PMAP_H_
 #define        _CPU_PMAP_H_
@@ -87,6 +87,9 @@
 #define PGEX_W         0x02    /* during a Write cycle */
 #define PGEX_U         0x04    /* access from User mode (UPL) */
 
+#define PGEX_MAILBOX   0x40
+#define PGEX_FPFAULT   0x80
+
 /*
  * User space is limited to one PML4 entry (512GB).  Kernel space is also
  * limited to one PML4 entry.  Other PML4 entries are used to map foreign
@@ -226,9 +229,12 @@ struct pmap_statistics {
 };
 typedef struct pmap_statistics *pmap_statistics_t;
 
+struct vm_object;
+struct vm_page;
+
 struct pmap {
        pd_entry_t              *pm_pdir;       /* KVA of page directory */
-       vm_object_t             pm_pteobj;      /* Container for pte's */
+       struct vm_object        *pm_pteobj;     /* Container for pte's */
        TAILQ_HEAD(,pv_entry)   pm_pvlist;      /* list of mappings in pmap */
        int                     pm_count;       /* reference count */
        cpumask_t               pm_active;      /* active on cpus */
@@ -241,19 +247,19 @@ struct pmap {
 typedef struct pmap    *pmap_t;
 
 #ifdef _KERNEL
-extern pmap_t          kernel_pmap;
+extern struct pmap             kernel_pmap;
 #endif
 
 /*
  * For each vm_page_t, there is a list of all currently valid virtual
- * mappings of that page.  An entry is a pv_entry_t, the list is pv_table.
+ * mappings of that page.  An entry is a pv_entry_t, the list is pv_list
  */
 typedef struct pv_entry {
        pmap_t          pv_pmap;        /* pmap where mapping lies */
        vm_offset_t     pv_va;          /* virtual address for mapping */
        TAILQ_ENTRY(pv_entry)   pv_list;
        TAILQ_ENTRY(pv_entry)   pv_plist;
-       vm_page_t       pv_ptem;        /* VM page for pte */
+       struct vm_page  *pv_ptem;       /* VM page for pte */
 } *pv_entry_t;
 
 #ifdef _KERNEL
@@ -272,17 +278,15 @@ extern vm_paddr_t avail_end;
 extern vm_paddr_t avail_start;
 extern vm_offset_t clean_eva;
 extern vm_offset_t clean_sva;
-extern vm_paddr_t phys_avail[];
 extern char *ptvmmap;          /* poor name! */
 extern vm_offset_t virtual_avail;
-extern vm_offset_t virtual_end;
 
 void   pmap_bootstrap ( vm_paddr_t, vm_paddr_t);
 pmap_t pmap_kernel (void);
 void   *pmap_mapdev (vm_paddr_t, vm_size_t);
 void   pmap_unmapdev (vm_offset_t, vm_size_t);
 unsigned *pmap_pte (pmap_t, vm_offset_t) __pure2;
-vm_page_t pmap_use_pt (pmap_t, vm_offset_t);
+struct vm_page *pmap_use_pt (pmap_t, vm_offset_t);
 #ifdef SMP
 void   pmap_set_opt (void);
 #endif
index d00ba92..eaacaea 100644 (file)
@@ -35,7 +35,7 @@
  *
  *     from: @(#)psl.h 5.2 (Berkeley) 1/18/91
  * $FreeBSD: src/sys/amd64/include/psl.h,v 1.12 2003/05/01 01:05:23 peter Exp $
- * $DragonFly: src/sys/cpu/amd64/include/psl.h,v 1.1 2007/08/21 19:40:24 corecode Exp $
+ * $DragonFly: src/sys/cpu/amd64/include/psl.h,v 1.2 2007/09/23 04:29:30 yanyh Exp $
  */
 
 #ifndef _CPU_PSL_H_
@@ -56,7 +56,7 @@
 #define        PSL_IOPL        0x00003000      /* i/o privilege level */
 #define        PSL_NT          0x00004000      /* nested task bit */
 #define        PSL_RF          0x00010000      /* resume flag bit */
-/* #define PSL_VM      0x00020000 */   /* virtual 8086 mode bit */
+/* #define PSL_VM              0x00020000 */   /* virtual 8086 mode bit */
 #define        PSL_AC          0x00040000      /* alignment checking */
 /* #define PSL_VIF     0x00080000 */   /* virtual interrupt enable */
 /* #define PSL_VIP     0x00100000 */   /* virtual interrupt pending */
index 5a5f87f..58ad151 100644 (file)
@@ -32,7 +32,7 @@
  *
  *     from: @(#)reg.h 5.5 (Berkeley) 1/18/91
  * $FreeBSD: src/sys/amd64/include/reg.h,v 1.35 2004/04/05 23:55:14 imp Exp $
- * $DragonFly: src/sys/cpu/amd64/include/reg.h,v 1.1 2007/08/21 19:40:24 corecode Exp $
+ * $DragonFly: src/sys/cpu/amd64/include/reg.h,v 1.2 2007/09/23 04:29:30 yanyh Exp $
  */
 
 #ifndef _CPU_REG_H_
@@ -98,16 +98,4 @@ struct dbreg {
 #define DBREG_DR7_RDWR      0x03      /* break on read or write */
 #define DBREG_DRX(d,x) ((d)->dr[(x)]) /* reference dr0 - dr15 by
                                          register number */
-#ifdef _KERNEL
-/*
- * XXX these interfaces are MI, so they should be declared in a MI place.
- */
-int    fill_regs(struct thread *, struct reg *);
-int    set_regs(struct thread *, struct reg *);
-int    fill_fpregs(struct thread *, struct fpreg *);
-int    set_fpregs(struct thread *, struct fpreg *);
-int    fill_dbregs(struct thread *, struct dbreg *);
-int    set_dbregs(struct thread *, struct dbreg *);
-#endif
-
 #endif /* !_CPU_REG_H_ */
index 9f0bce5..c1cdfc8 100644 (file)
@@ -36,7 +36,7 @@
  *
  *     from: @(#)segments.h    7.1 (Berkeley) 5/9/91
  * $FreeBSD: src/sys/i386/include/segments.h,v 1.24 1999/12/29 04:33:07 peter Exp $
- * $DragonFly: src/sys/cpu/amd64/include/segments.h,v 1.1 2007/08/21 19:40:24 corecode Exp $
+ * $DragonFly: src/sys/cpu/amd64/include/segments.h,v 1.2 2007/09/23 04:29:30 yanyh Exp $
  */
 
 #ifndef _CPU_SEGMENTS_H_
@@ -60,6 +60,8 @@
 #define        LSEL(s,r)       (((s)<<3) | SEL_LDT | r)        /* a local selector */
 #define        GSEL(s,r)       (((s)<<3) | r)                  /* a global selector */
 
+#ifndef LOCORE
+
 /*
  * Memory and System segment descriptors
  */
@@ -99,7 +101,9 @@ union        descriptor      {
        struct  gate_descriptor gd;
 };
 
-       /* system segments and gate types */
+#endif /* LOCORE */
+
+/* system segments and gate types */
 #define        SDT_SYSNULL      0      /* system null */
 #define        SDT_SYS286TSS    1      /* system 286 TSS available */
 #define        SDT_SYSLDT       2      /* system local descriptor table */
@@ -135,6 +139,9 @@ union       descriptor      {
 #define        SDT_MEMERC      30      /* memory execute read conforming */
 #define        SDT_MEMERAC     31      /* memory execute read accessed conforming */
 
+
+#ifndef LOCORE
+
 /* is memory segment descriptor pointer ? */
 #define ISMEMSDP(s)    ((s->d_type) >= SDT_MEMRO && (s->d_type) <= SDT_MEMERAC)
 
@@ -174,6 +181,7 @@ struct      soft_segment_descriptor {
        unsigned ssd_def32:1 ;          /* default 32 vs 16 bit size */
        unsigned ssd_gran:1 ;           /* limit granularity (byte/page units)*/
 };
+#endif /* 0 */
 
 /*
  * region descriptors, used to load gdt/idt tables before segments yet exist.
@@ -183,7 +191,7 @@ struct region_descriptor {
        unsigned rd_base:32 __attribute__ ((packed));   /* base address  */
 };
 
-#endif /* 0 */
+#endif /* LOCORE */
 
 /*
  * Segment Protection Exception code bits
@@ -220,7 +228,10 @@ struct region_descriptor {
 #define GBIOSDATA_SEL  12      /* BIOS interface (Data) */
 #define GBIOSUTIL_SEL  13      /* BIOS interface (Utility) */
 #define GBIOSARGS_SEL  14      /* BIOS interface (Arguments) */
+#define GTLS_START     15      /* Thread TLS Descriptor */
+#define GTLS_END       17      /* Thread TLS Descriptor */
 
+#define NGTLS                  (GTLS_END - GTLS_START + 1)
 #ifdef BDE_DEBUGGER
 #define        NGDT            18      /* some of 11-17 are reserved for debugger */
 #else
@@ -241,6 +252,11 @@ struct region_descriptor {
 #define LBSDICALLS_SEL 16      /* BSDI system call gate */
 #define NLDT           (LBSDICALLS_SEL + 1)
 
+#ifndef LOCORE
+struct savetls {
+       struct segment_descriptor tls[NGTLS];
+};
+
 #ifdef _KERNEL
 extern int     _default_ldt;
 extern union descriptor gdt[];
@@ -254,5 +270,6 @@ void        sdtossd         (struct segment_descriptor *sdp,
 void   ssdtosd         (struct soft_segment_descriptor *ssdp,
                             struct segment_descriptor *sdp);
 #endif /* _KERNEL */
+#endif /* LOCORE */
 
 #endif /* !_CPU_SEGMENTS_H_ */
diff --git a/sys/cpu/amd64/include/sigframe.h b/sys/cpu/amd64/include/sigframe.h
new file mode 100644 (file)
index 0000000..cc27aa0
--- /dev/null
@@ -0,0 +1,60 @@
+/*-
+ * Copyright (c) 1999 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer 
+ *    in this position and unchanged.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: src/sys/i386/include/sigframe.h,v 1.5 1999/12/04 10:40:24 marcel Exp $
+ * $DragonFly: src/sys/cpu/amd64/include/sigframe.h,v 1.1 2007/09/23 04:29:30 yanyh Exp $
+ */
+
+#ifndef _CPU_SIGFRAME_H_
+#define        _CPU_SIGFRAME_H_
+
+/*
+ * Signal frames, arguments passed to application signal handlers.
+ */
+
+struct sigframe {
+       /*
+        * The first four members may be used by applications.
+        *
+        * NOTE: The 4th argument is undocumented, ill commented
+        * on and seems to be somewhat BSD "standard".  Handlers
+        * installed with sigvec may be using it.
+        */
+       register_t      sf_signum;
+       register_t      sf_siginfo;     /* code or pointer to sf_si */
+       register_t      sf_ucontext;    /* points to sf_uc */
+       register_t      sf_addr;        /* undocumented 4th arg */
+
+       union {
+               __siginfohandler_t      *sf_action;
+               __sighandler_t          *sf_handler;
+       } sf_ahu;
+       ucontext_t      sf_uc;          /* = *sf_ucontext */
+       siginfo_t       sf_si;          /* = *sf_siginfo (SA_SIGINFO case) */
+};
+
+#endif /* !_CPU_SIGFRAME_H_ */
diff --git a/sys/cpu/amd64/include/specialreg.h b/sys/cpu/amd64/include/specialreg.h
new file mode 100644 (file)
index 0000000..e949e6b
--- /dev/null
@@ -0,0 +1,397 @@
+/*-
+ * Copyright (c) 1991 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *     from: @(#)specialreg.h  7.1 (Berkeley) 5/9/91
+ * $FreeBSD: src/sys/amd64/include/specialreg.h,v 1.39 2007/05/31 11:26:44 des Exp $
+ * $DragonFly: src/sys/cpu/amd64/include/specialreg.h,v 1.1 2007/09/23 04:29:30 yanyh Exp $
+ */
+
+#ifndef _CPU_SPECIALREG_H_
+#define        _CPU_SPECIALREG_H_
+
+/*
+ * Bits in 386 special registers:
+ */
+#define        CR0_PE  0x00000001      /* Protected mode Enable */
+#define        CR0_MP  0x00000002      /* "Math" (fpu) Present */
+#define        CR0_EM  0x00000004      /* EMulate FPU instructions. (trap ESC only) */
+#define        CR0_TS  0x00000008      /* Task Switched (if MP, trap ESC and WAIT) */
+#define        CR0_PG  0x80000000      /* PaGing enable */
+
+/*
+ * Bits in 486 special registers:
+ */
+#define        CR0_NE  0x00000020      /* Numeric Error enable (EX16 vs IRQ13) */
+#define        CR0_WP  0x00010000      /* Write Protect (honor page protect in
+                                                          all modes) */
+#define        CR0_AM  0x00040000      /* Alignment Mask (set to enable AC flag) */
+#define        CR0_NW  0x20000000      /* Not Write-through */
+#define        CR0_CD  0x40000000      /* Cache Disable */
+
+/*
+ * Bits in PPro special registers
+ */
+#define        CR4_VME 0x00000001      /* Virtual 8086 mode extensions */
+#define        CR4_PVI 0x00000002      /* Protected-mode virtual interrupts */
+#define        CR4_TSD 0x00000004      /* Time stamp disable */
+#define        CR4_DE  0x00000008      /* Debugging extensions */
+#define        CR4_PSE 0x00000010      /* Page size extensions */
+#define        CR4_PAE 0x00000020      /* Physical address extension */
+#define        CR4_MCE 0x00000040      /* Machine check enable */
+#define        CR4_PGE 0x00000080      /* Page global enable */
+#define        CR4_PCE 0x00000100      /* Performance monitoring counter enable */
+#define        CR4_FXSR 0x00000200     /* Fast FPU save/restore used by OS */
+#define        CR4_XMM 0x00000400      /* enable SIMD/MMX2 to use except 16 */
+
+/*
+ * Bits in AMD64 special registers.  EFER is 64 bits wide.
+ */
+#define        EFER_SCE 0x000000001    /* System Call Extensions (R/W) */
+#define        EFER_LME 0x000000100    /* Long mode enable (R/W) */
+#define        EFER_LMA 0x000000400    /* Long mode active (R) */
+#define        EFER_NXE 0x000000800    /* PTE No-Execute bit enable (R/W) */
+
+/*
+ * CPUID instruction features register
+ */
+#define        CPUID_FPU       0x00000001
+#define        CPUID_VME       0x00000002
+#define        CPUID_DE        0x00000004
+#define        CPUID_PSE       0x00000008
+#define        CPUID_TSC       0x00000010
+#define        CPUID_MSR       0x00000020
+#define        CPUID_PAE       0x00000040
+#define        CPUID_MCE       0x00000080
+#define        CPUID_CX8       0x00000100
+#define        CPUID_APIC      0x00000200
+#define        CPUID_B10       0x00000400
+#define        CPUID_SEP       0x00000800
+#define        CPUID_MTRR      0x00001000
+#define        CPUID_PGE       0x00002000
+#define        CPUID_MCA       0x00004000
+#define        CPUID_CMOV      0x00008000
+#define        CPUID_PAT       0x00010000
+#define        CPUID_PSE36     0x00020000
+#define        CPUID_PSN       0x00040000
+#define        CPUID_CLFSH     0x00080000
+#define        CPUID_B20       0x00100000
+#define        CPUID_DS        0x00200000
+#define        CPUID_ACPI      0x00400000
+#define        CPUID_MMX       0x00800000
+#define        CPUID_FXSR      0x01000000
+#define        CPUID_SSE       0x02000000
+#define        CPUID_XMM       0x02000000
+#define        CPUID_SSE2      0x04000000
+#define        CPUID_SS        0x08000000
+#define        CPUID_HTT       0x10000000
+#define        CPUID_TM        0x20000000
+#define        CPUID_IA64      0x40000000
+#define        CPUID_PBE       0x80000000
+
+#define        CPUID2_SSE3     0x00000001
+#define        CPUID2_MON      0x00000008
+#define        CPUID2_DS_CPL   0x00000010
+#define        CPUID2_VMX      0x00000020
+#define        CPUID2_SMX      0x00000040
+#define        CPUID2_EST      0x00000080
+#define        CPUID2_TM2      0x00000100
+#define        CPUID2_SSSE3    0x00000200
+#define        CPUID2_CNXTID   0x00000400
+#define        CPUID2_CX16     0x00002000
+#define        CPUID2_XTPR     0x00004000
+#define        CPUID2_PDCM     0x00008000
+#define        CPUID2_DCA      0x00040000
+
+/*
+ * Important bits in the AMD extended cpuid flags
+ */
+#define        AMDID_SYSCALL   0x00000800
+#define        AMDID_MP        0x00080000
+#define        AMDID_NX        0x00100000
+#define        AMDID_EXT_MMX   0x00400000
+#define        AMDID_FFXSR     0x01000000
+#define        AMDID_RDTSCP    0x08000000
+#define        AMDID_LM        0x20000000
+#define        AMDID_EXT_3DNOW 0x40000000
+#define        AMDID_3DNOW     0x80000000
+
+#define        AMDID2_LAHF     0x00000001
+#define        AMDID2_CMP      0x00000002
+#define        AMDID2_SVM      0x00000004
+#define        AMDID2_EXT_APIC 0x00000008
+#define        AMDID2_CR8      0x00000010
+#define        AMDID2_PREFETCH 0x00000100
+
+/*
+ * CPUID instruction 1 ebx info
+ */
+#define        CPUID_BRAND_INDEX       0x000000ff
+#define        CPUID_CLFUSH_SIZE       0x0000ff00
+#define        CPUID_HTT_CORES         0x00ff0000
+#define        CPUID_LOCAL_APIC_ID     0xff000000
+
+/*
+ * AMD extended function 8000_0008h ecx info
+ */
+#define        AMDID_CMP_CORES         0x000000ff
+
+/*
+ * Model-specific registers for the i386 family
+ */
+#define        MSR_P5_MC_ADDR          0x000
+#define        MSR_P5_MC_TYPE          0x001
+#define        MSR_TSC                 0x010
+#define        MSR_P5_CESR             0x011
+#define        MSR_P5_CTR0             0x012
+#define        MSR_P5_CTR1             0x013
+#define        MSR_IA32_PLATFORM_ID    0x017
+#define        MSR_APICBASE            0x01b
+#define        MSR_EBL_CR_POWERON      0x02a
+#define        MSR_TEST_CTL            0x033
+#define        MSR_BIOS_UPDT_TRIG      0x079
+#define        MSR_BBL_CR_D0           0x088
+#define        MSR_BBL_CR_D1           0x089
+#define        MSR_BBL_CR_D2           0x08a
+#define        MSR_BIOS_SIGN           0x08b
+#define        MSR_PERFCTR0            0x0c1
+#define        MSR_PERFCTR1            0x0c2
+#define        MSR_MTRRcap             0x0fe
+#define        MSR_BBL_CR_ADDR         0x116
+#define        MSR_BBL_CR_DECC         0x118
+#define        MSR_BBL_CR_CTL          0x119
+#define        MSR_BBL_CR_TRIG         0x11a
+#define        MSR_BBL_CR_BUSY         0x11b
+#define        MSR_BBL_CR_CTL3         0x11e
+#define        MSR_SYSENTER_CS_MSR     0x174
+#define        MSR_SYSENTER_ESP_MSR    0x175
+#define        MSR_SYSENTER_EIP_MSR    0x176
+#define        MSR_MCG_CAP             0x179
+#define        MSR_MCG_STATUS          0x17a
+#define        MSR_MCG_CTL             0x17b
+#define        MSR_EVNTSEL0            0x186
+#define        MSR_EVNTSEL1            0x187
+#define        MSR_THERM_CONTROL       0x19a
+#define        MSR_THERM_INTERRUPT     0x19b
+#define        MSR_THERM_STATUS        0x19c
+#define        MSR_IA32_MISC_ENABLE    0x1a0
+#define        MSR_DEBUGCTLMSR         0x1d9
+#define        MSR_LASTBRANCHFROMIP    0x1db
+#define        MSR_LASTBRANCHTOIP      0x1dc
+#define        MSR_LASTINTFROMIP       0x1dd
+#define        MSR_LASTINTTOIP         0x1de
+#define        MSR_ROB_CR_BKUPTMPDR6   0x1e0
+#define        MSR_MTRRVarBase         0x200
+#define        MSR_MTRR64kBase         0x250
+#define        MSR_MTRR16kBase         0x258
+#define        MSR_MTRR4kBase          0x268
+#define        MSR_PAT                 0x277
+#define        MSR_MTRRdefType         0x2ff
+#define        MSR_MC0_CTL             0x400
+#define        MSR_MC0_STATUS          0x401
+#define        MSR_MC0_ADDR            0x402
+#define        MSR_MC0_MISC            0x403
+#define        MSR_MC1_CTL             0x404
+#define        MSR_MC1_STATUS          0x405
+#define        MSR_MC1_ADDR            0x406
+#define        MSR_MC1_MISC            0x407
+#define        MSR_MC2_CTL             0x408
+#define        MSR_MC2_STATUS          0x409
+#define        MSR_MC2_ADDR            0x40a
+#define        MSR_MC2_MISC            0x40b
+#define        MSR_MC3_CTL             0x40c
+#define        MSR_MC3_STATUS          0x40d
+#define        MSR_MC3_ADDR            0x40e
+#define        MSR_MC3_MISC            0x40f
+#define        MSR_MC4_CTL             0x410
+#define        MSR_MC4_STATUS          0x411
+#define        MSR_MC4_ADDR            0x412
+#define        MSR_MC4_MISC            0x413
+
+/*
+ * Constants related to MSR's.
+ */
+#define        APICBASE_RESERVED       0x000006ff
+#define        APICBASE_BSP            0x00000100
+#define        APICBASE_ENABLED        0x00000800
+#define        APICBASE_ADDRESS        0xfffff000
+
+/*
+ * PAT modes.
+ */
+#define        PAT_UNCACHEABLE         0x00
+#define        PAT_WRITE_COMBINING     0x01
+#define        PAT_WRITE_THROUGH       0x04
+#define        PAT_WRITE_PROTECTED     0x05
+#define        PAT_WRITE_BACK          0x06
+#define        PAT_UNCACHED            0x07
+#define        PAT_VALUE(i, m)         ((long)(m) << (8 * (i)))
+#define        PAT_MASK(i)             PAT_VALUE(i, 0xff)
+
+/*
+ * Constants related to MTRRs
+ */
+#define        MTRR_N64K               8       /* numbers of fixed-size entries */
+#define        MTRR_N16K               16
+#define        MTRR_N4K                64
+
+/* Performance Control Register (5x86 only). */
+#define        PCR0                    0x20
+#define        PCR0_RSTK               0x01    /* Enables return stack */
+#define        PCR0_BTB                0x02    /* Enables branch target buffer */
+#define        PCR0_LOOP               0x04    /* Enables loop */
+#define        PCR0_AIS                0x08    /* Enables all instrcutions stalled to
+                                                                  serialize pipe. */
+#define        PCR0_MLR                0x10    /* Enables reordering of misaligned loads */
+#define        PCR0_BTBRT              0x40    /* Enables BTB test register. */
+#define        PCR0_LSSER              0x80    /* Disable reorder */
+
+/* Device Identification Registers */
+#define        DIR0                    0xfe
+#define        DIR1                    0xff
+
+/*
+ * The following four 3-byte registers control the non-cacheable regions.
+ * These registers must be written as three separate bytes.
+ *
+ * NCRx+0: A31-A24 of starting address
+ * NCRx+1: A23-A16 of starting address
+ * NCRx+2: A15-A12 of starting address | NCR_SIZE_xx.
+ *
+ * The non-cacheable region's starting address must be aligned to the
+ * size indicated by the NCR_SIZE_xx field.
+ */
+#define        NCR1    0xc4
+#define        NCR2    0xc7
+#define        NCR3    0xca
+#define        NCR4    0xcd
+
+#define        NCR_SIZE_0K     0
+#define        NCR_SIZE_4K     1
+#define        NCR_SIZE_8K     2
+#define        NCR_SIZE_16K    3
+#define        NCR_SIZE_32K    4
+#define        NCR_SIZE_64K    5
+#define        NCR_SIZE_128K   6
+#define        NCR_SIZE_256K   7
+#define        NCR_SIZE_512K   8
+#define        NCR_SIZE_1M     9
+#define        NCR_SIZE_2M     10
+#define        NCR_SIZE_4M     11
+#define        NCR_SIZE_8M     12
+#define        NCR_SIZE_16M    13
+#define        NCR_SIZE_32M    14
+#define        NCR_SIZE_4G     15
+
+/*
+ * The address region registers are used to specify the location and
+ * size for the eight address regions.
+ *
+ * ARRx + 0: A31-A24 of start address
+ * ARRx + 1: A23-A16 of start address
+ * ARRx + 2: A15-A12 of start address | ARR_SIZE_xx
+ */
+#define        ARR0    0xc4
+#define        ARR1    0xc7
+#define        ARR2    0xca
+#define        ARR3    0xcd
+#define        ARR4    0xd0
+#define        ARR5    0xd3
+#define        ARR6    0xd6
+#define        ARR7    0xd9
+
+#define        ARR_SIZE_0K             0
+#define        ARR_SIZE_4K             1
+#define        ARR_SIZE_8K             2
+#define        ARR_SIZE_16K    3
+#define        ARR_SIZE_32K    4
+#define        ARR_SIZE_64K    5
+#define        ARR_SIZE_128K   6
+#define        ARR_SIZE_256K   7
+#define        ARR_SIZE_512K   8
+#define        ARR_SIZE_1M             9
+#define        ARR_SIZE_2M             10
+#define        ARR_SIZE_4M             11
+#define        ARR_SIZE_8M             12
+#define        ARR_SIZE_16M    13
+#define        ARR_SIZE_32M    14
+#define        ARR_SIZE_4G             15
+
+/*
+ * The region control registers specify the attributes associated with
+ * the ARRx addres regions.
+ */
+#define        RCR0    0xdc
+#define        RCR1    0xdd
+#define        RCR2    0xde
+#define        RCR3    0xdf
+#define        RCR4    0xe0
+#define        RCR5    0xe1
+#define        RCR6    0xe2
+#define        RCR7    0xe3
+
+#define        RCR_RCD 0x01    /* Disables caching for ARRx (x = 0-6). */
+#define        RCR_RCE 0x01    /* Enables caching for ARR7. */
+#define        RCR_WWO 0x02    /* Weak write ordering. */
+#define        RCR_WL  0x04    /* Weak locking. */
+#define        RCR_WG  0x08    /* Write gathering. */
+#define        RCR_WT  0x10    /* Write-through. */
+#define        RCR_NLB 0x20    /* LBA# pin is not asserted. */
+
+/* AMD Write Allocate Top-Of-Memory and Control Register */
+#define        AMD_WT_ALLOC_TME        0x40000 /* top-of-memory enable */
+#define        AMD_WT_ALLOC_PRE        0x20000 /* programmable range enable */
+#define        AMD_WT_ALLOC_FRE        0x10000 /* fixed (A0000-FFFFF) range enable */
+
+/* AMD64 MSR's */
+#define        MSR_EFER        0xc0000080      /* extended features */
+#define        MSR_STAR        0xc0000081      /* legacy mode SYSCALL target/cs/ss */
+#define        MSR_LSTAR       0xc0000082      /* long mode SYSCALL target rip */
+#define        MSR_CSTAR       0xc0000083      /* compat mode SYSCALL target rip */
+#define        MSR_SF_MASK     0xc0000084      /* syscall flags mask */
+#define        MSR_FSBASE      0xc0000100      /* base address of the %fs "segment" */
+#define        MSR_GSBASE      0xc0000101      /* base address of the %gs "segment" */
+#define        MSR_KGSBASE     0xc0000102      /* base address of the kernel %gs */
+#define        MSR_PERFEVSEL0  0xc0010000
+#define        MSR_PERFEVSEL1  0xc0010001
+#define        MSR_PERFEVSEL2  0xc0010002
+#define        MSR_PERFEVSEL3  0xc0010003
+#undef MSR_PERFCTR0
+#undef MSR_PERFCTR1
+#define        MSR_PERFCTR0    0xc0010004
+#define        MSR_PERFCTR1    0xc0010005
+#define        MSR_PERFCTR2    0xc0010006
+#define        MSR_PERFCTR3    0xc0010007
+#define        MSR_SYSCFG      0xc0010010
+#define        MSR_IORRBASE0   0xc0010016
+#define        MSR_IORRMASK0   0xc0010017
+#define        MSR_IORRBASE1   0xc0010018
+#define        MSR_IORRMASK1   0xc0010019
+#define        MSR_TOP_MEM     0xc001001a      /* boundary for ram below 4G */
+#define        MSR_TOP_MEM2    0xc001001d      /* boundary for ram above 4G */
+
+#endif /* !_CPU_SPECIALREG_H_ */
index 0d3ec43..689ce3c 100644 (file)
@@ -35,7 +35,7 @@
  *
  *     from: @(#)trap.h        5.4 (Berkeley) 5/9/91
  * $FreeBSD: src/sys/amd64/include/trap.h,v 1.13 2001/07/12 06:32:51 peter Exp $
- * $DragonFly: src/sys/cpu/amd64/include/trap.h,v 1.1 2007/08/21 19:40:24 corecode Exp $
+ * $DragonFly: src/sys/cpu/amd64/include/trap.h,v 1.2 2007/09/23 04:29:30 yanyh Exp $
  */
 
 #ifndef _CPU_TRAP_H_
@@ -49,6 +49,7 @@
 #define        T_PRIVINFLT     1       /* privileged instruction */
 #define        T_BPTFLT        3       /* breakpoint instruction */
 #define        T_ARITHTRAP     6       /* arithmetic trap */
+#define T_ASTFLT       7       /* system forced exception */
 #define        T_PROTFLT       9       /* protection fault */
 #define        T_TRCTRAP       10      /* debug exception (sic) */
 #define        T_PAGEFLT       12      /* page fault */
 #define            BUS_SEGM_FAULT      T_RESERVED      /* segment protection base */
 
 /* Trap's coming from user mode */
+#define T_SYSCALL80    0x080
 #define        T_USER  0x100
 
 #endif /* !_CPU_TRAP_H_ */
index 0bf2ad6..a4e3e18 100644 (file)
@@ -31,7 +31,7 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  * 
- * $DragonFly: src/sys/cpu/amd64/include/vframe.h,v 1.1 2007/08/21 19:40:24 corecode Exp $
+ * $DragonFly: src/sys/cpu/amd64/include/vframe.h,v 1.2 2007/09/23 04:29:30 yanyh Exp $
  */
 
 #ifndef _CPU_VFRAME_H_
@@ -50,7 +50,7 @@
  */
 struct vextframe {
        /* XXX come back for fixing this in segments.h */
-       /* struct savetls vx_tls; */
+       struct savetls vx_tls;
 };
 
 #endif
index bed16a1..355d8fd 100644 (file)
@@ -1,10 +1,9 @@
 # Used by the device build to check for device support
 #
-# $DragonFly: src/sys/platform/pc64/Makefile.inc,v 1.1 2007/08/21 19:45:45 corecode Exp $
+# $DragonFly: src/sys/platform/pc64/Makefile.inc,v 1.2 2007/09/23 04:29:31 yanyh Exp $
 
-DEV_SUPPORT=   acpica5 agp bridge crypto disk drm est misc netif \
-               pccard powermng raid serial sound usbmisc video
+DEV_SUPPORT=
 
-SYSCONS_APM_SUPPORT=1
-BOOT0CFG_SUPPORT=1
+SYSCONS_APM_SUPPORT=1
+BOOT0CFG_SUPPORT=1
 
diff --git a/sys/platform/pc64/amd64/atomic.c b/sys/platform/pc64/amd64/atomic.c
new file mode 100644 (file)
index 0000000..4bd3b35
--- /dev/null
@@ -0,0 +1,49 @@
+/*-
+ * Copyright (c) 1999 Peter Jeremy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: src/sys/i386/i386/atomic.c,v 1.3 1999/08/28 00:43:40 peter Exp $
+ * $DragonFly: src/sys/platform/pc64/amd64/atomic.c,v 1.1 2007/09/23 04:29:31 yanyh Exp $
+ */
+
+/* This file creates publically callable functions to perform various
+ * simple arithmetic on memory which is atomic in the presence of
+ * interrupts and multiple processors.
+ */
+#include <sys/types.h>
+
+/* Firstly make atomic.h generate prototypes as it will for kernel modules */
+#define KLD_MODULE
+#include <machine/atomic.h>
+#undef _MACHINE_ATOMIC_H_      /* forget we included it */
+#undef _CPU_ATOMIC_H_          /* forget we included it */
+#undef KLD_MODULE
+#undef ATOMIC_ASM
+
+/* Make atomic.h generate public functions */
+#define static
+#undef __inline
+#define __inline
+
+#include <machine/atomic.h>
diff --git a/sys/platform/pc64/amd64/autoconf.c b/sys/platform/pc64/amd64/autoconf.c
new file mode 100644 (file)
index 0000000..935f58f
--- /dev/null
@@ -0,0 +1,540 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *     This product includes software developed by the University of
+ *     California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *     from: @(#)autoconf.c    7.1 (Berkeley) 5/9/91
+ * $FreeBSD: src/sys/i386/i386/autoconf.c,v 1.146.2.2 2001/06/07 06:05:58 dd Exp $
+ * $DragonFly: src/sys/platform/pc64/amd64/autoconf.c,v 1.1 2007/09/23 04:29:31 yanyh Exp $
+ */
+
+/*
+ * Setup the system to run on the current machine.
+ *
+ * Configure() is called at boot time and initializes the vba
+ * device tables and the memory controller monitoring.  Available
+ * devices are determined (from possibilities mentioned in ioconf.c),
+ * and the drivers are initialized.
+ */
+#include "opt_bootp.h"
+#include "opt_ffs.h"
+#include "opt_cd9660.h"
+#include "opt_nfs.h"
+#include "opt_nfsroot.h"
+#include "opt_bus.h"
+#include "opt_rootdevname.h"
+
+#include "use_isa.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bootmaj.h>
+#include <sys/bus.h>
+#include <sys/buf.h>
+#include <sys/conf.h>
+#include <sys/diskslice.h>
+#include <sys/reboot.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mount.h>
+#include <sys/cons.h>
+#include <sys/thread.h>
+#include <sys/device.h>
+#include <sys/machintr.h>
+
+#include <vm/pmap.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_pager.h>
+
+#if 0
+#include <machine/pcb.h>
+#include <machine/pcb_ext.h>
+#include <machine/vm86.h>
+#endif
+#include <machine/smp.h>
+#include <machine/globaldata.h>
+#include <machine/md_var.h>
+
+#if NISA > 0
+#include <bus/isa/isavar.h>
+
+device_t isa_bus_device = 0;
+#endif
+
+static void cpu_startup (void *);
+static void configure_first (void *);
+static void configure (void *);
+static void configure_final (void *);
+
+#if defined(FFS) && defined(FFS_ROOT)
+static void    setroot (void);
+#endif
+
+#if defined(NFS) && defined(NFS_ROOT)
+#if !defined(BOOTP_NFSROOT)
+static void    pxe_setup_nfsdiskless(void);
+#endif
+#endif
+
+SYSINIT(cpu, SI_BOOT2_SMP, SI_ORDER_FIRST, cpu_startup, NULL);
+SYSINIT(configure1, SI_SUB_CONFIGURE, SI_ORDER_FIRST, configure_first, NULL);
+/* SI_ORDER_SECOND is hookable */
+SYSINIT(configure2, SI_SUB_CONFIGURE, SI_ORDER_THIRD, configure, NULL);
+/* SI_ORDER_MIDDLE is hookable */
+SYSINIT(configure3, SI_SUB_CONFIGURE, SI_ORDER_ANY, configure_final, NULL);
+
+cdev_t rootdev = NULL;
+cdev_t dumpdev = NULL;
+
+/*
+ * 
+ */
+static void
+cpu_startup(void *dummy)
+{
+       vm_offset_t buffer_sva;
+       vm_offset_t buffer_eva;
+       vm_offset_t pager_sva;
+       vm_offset_t pager_eva;
+
+       kprintf("%s", version);
+       kprintf("real memory = %llu (%lluK bytes)\n",
+               ptoa(Maxmem), ptoa(Maxmem) / 1024);
+
+       if (nbuf == 0) {
+               int factor = 4 * BKVASIZE / 1024;
+               int kbytes = Maxmem * (PAGE_SIZE / 1024);
+
+               nbuf = 50;
+               if (kbytes > 4096)
+                       nbuf += min((kbytes - 4096) / factor, 65536 / factor);
+               if (kbytes > 65536)
+                       nbuf += (kbytes - 65536) * 2 / (factor * 5);
+               if (maxbcache && nbuf > maxbcache / BKVASIZE)
+                       nbuf = maxbcache / BKVASIZE;
+       }
+       if (nbuf > (virtual_end - virtual_start) / (BKVASIZE * 2)) {
+               nbuf = (virtual_end - virtual_start) / (BKVASIZE * 2);
+               kprintf("Warning: nbufs capped at %d\n", nbuf);
+       }
+
+       nswbuf = max(min(nbuf/4, 256), 16);
+#ifdef NSWBUF_MIN
+       if (nswbuf < NSWBUF_MIN)
+               nswbuf = NSWBUF_MIN;
+#endif
+
+       /*
+        * Allocate memory for the buffer cache
+        */
+       buf = (void *)kmem_alloc(&kernel_map, nbuf * sizeof(struct buf));
+       swbuf = (void *)kmem_alloc(&kernel_map, nswbuf * sizeof(struct buf));
+
+
+#ifdef DIRECTIO
+        ffs_rawread_setup();
+#endif
+       kmem_suballoc(&kernel_map, &clean_map, &clean_sva, &clean_eva,
+                     (nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size);
+       kmem_suballoc(&clean_map, &buffer_map, &buffer_sva, &buffer_eva,
+                     (nbuf*BKVASIZE));
+       buffer_map.system_map = 1;
+       kmem_suballoc(&clean_map, &pager_map, &pager_sva, &pager_eva,
+                     (nswbuf*MAXPHYS) + pager_map_size);
+       pager_map.system_map = 1;
+#if defined(USERCONFIG)
+        userconfig();
+       cninit();               /* the preferred console may have changed */
+#endif
+       kprintf("avail memory = %u (%uK bytes)\n", ptoa(vmstats.v_free_count),
+               ptoa(vmstats.v_free_count) / 1024);
+       bufinit();
+       vm_pager_bufferinit();
+#ifdef SMP
+       mp_start();
+       mp_announce();
+#endif
+       cpu_setregs();
+}
+
+/*
+ * Determine i/o configuration for a machine.
+ */
+static void
+configure_first(void *dummy)
+{
+}
+
+static void
+configure(void *dummy)
+{
+        /*
+        * Final interrupt support acviation, then enable hardware interrupts.
+        */
+       MachIntrABI.finalize();
+       cpu_enable_intr();
+
+       /*
+        * This will configure all devices, generally starting with the
+        * nexus (i386/i386/nexus.c).  The nexus ISA code explicitly
+        * dummies up the attach in order to delay legacy initialization
+        * until after all other busses/subsystems have had a chance
+        * at those resources.
+        */
+       root_bus_configure();
+
+#if NISA > 0
+       /*
+        * Explicitly probe and attach ISA last.  The isa bus saves
+        * it's device node at attach time for us here.
+        */
+       if (isa_bus_device)
+               isa_probe_children(isa_bus_device);
+#endif
+
+       /*
+        * Allow lowering of the ipl to the lowest kernel level if we
+        * panic (or call tsleep() before clearing `cold').  No level is
+        * completely safe (since a panic may occur in a critical region
+        * at splhigh()), but we want at least bio interrupts to work.
+        */
+       safepri = TDPRI_KERN_USER;
+}
+
+static void
+configure_final(void *dummy)
+{
+       cninit_finish();
+
+       if (bootverbose)
+               kprintf("Device configuration finished.\n");
+}
+
+#ifdef BOOTP
+void bootpc_init(void);
+#endif
+/*
+ * Do legacy root filesystem discovery.
+ */
+void
+cpu_rootconf(void)
+{
+#ifdef BOOTP
+        bootpc_init();
+#endif
+#if defined(NFS) && defined(NFS_ROOT)
+#if !defined(BOOTP_NFSROOT)
+       pxe_setup_nfsdiskless();
+       if (nfs_diskless_valid)
+#endif
+               rootdevnames[0] = "nfs:";
+#endif
+#if defined(FFS) && defined(FFS_ROOT)
+        if (!rootdevnames[0])
+                setroot();
+#endif
+}
+SYSINIT(cpu_rootconf, SI_SUB_ROOT_CONF, SI_ORDER_FIRST, cpu_rootconf, NULL)
+
+u_long bootdev = 0;            /* not a cdev_t - encoding is different */
+
+#if defined(FFS) && defined(FFS_ROOT)
+
+/*
+ * The boot code uses old block device major numbers to pass bootdev to
+ * us.  We have to translate these to character device majors because
+ * we don't have block devices any more.
+ */
+static int
+boot_translate_majdev(int bmajor)
+{
+       static int conv[] = { BOOTMAJOR_CONVARY };
+
+       if (bmajor >= 0 && bmajor < sizeof(conv)/sizeof(conv[0]))
+               return(conv[bmajor]);
+       return(-1);
+}
+
+/*
+ * Attempt to find the device from which we were booted.
+ * If we can do so, and not instructed not to do so,
+ * set rootdevs[] and rootdevnames[] to correspond to the
+ * boot device(s).
+ *
+ * This code survives in order to allow the system to be 
+ * booted from legacy environments that do not correctly
+ * populate the kernel environment. There are significant
+ * restrictions on the bootability of the system in this
+ * situation; it can only be mounting root from a 'da'
+ * 'wd' or 'fd' device, and the root filesystem must be ufs.
+ */
+static void
+setroot(void)
+{
+       int majdev, mindev, unit, slice, part;
+       cdev_t newrootdev, dev;
+       char partname[2];
+       char *sname;
+
+       if ((bootdev & B_MAGICMASK) != B_DEVMAGIC) {
+               kprintf("no B_DEVMAGIC (bootdev=%#lx)\n", bootdev);
+               return;
+       }
+       majdev = boot_translate_majdev(B_TYPE(bootdev));
+       if (bootverbose) {
+               kprintf("bootdev: %08lx type=%ld unit=%ld "
+                       "slice=%ld part=%ld major=%d\n",
+                       bootdev, B_TYPE(bootdev), B_UNIT(bootdev),
+                       B_SLICE(bootdev), B_PARTITION(bootdev), majdev);
+       }
+       dev = udev2dev(makeudev(majdev, 0), 0);
+       if (!dev_is_good(dev))
+               return;
+       unit = B_UNIT(bootdev);
+       slice = B_SLICE(bootdev);
+       if (slice == WHOLE_DISK_SLICE)
+               slice = COMPATIBILITY_SLICE;
+       if (slice < 0 || slice >= MAX_SLICES) {
+               kprintf("bad slice\n");
+               return;
+       }
+
+       part = B_PARTITION(bootdev);
+       mindev = dkmakeminor(unit, slice, part);
+       newrootdev = udev2dev(makeudev(majdev, mindev), 0);
+       if (!dev_is_good(newrootdev))
+               return;
+       sname = dsname(newrootdev, unit, slice, part, partname);
+       rootdevnames[0] = kmalloc(strlen(sname) + 6, M_DEVBUF, M_WAITOK);
+       ksprintf(rootdevnames[0], "ufs:%s%s", sname, partname);
+
+       /*
+        * For properly dangerously dedicated disks (ones with a historical
+        * bogus partition table), the boot blocks will give slice = 4, but
+        * the kernel will only provide the compatibility slice since it
+        * knows that slice 4 is not a real slice.  Arrange to try mounting
+        * the compatibility slice as root if mounting the slice passed by
+        * the boot blocks fails.  This handles the dangerously dedicated
+        * case and perhaps others.
+        */
+       if (slice == COMPATIBILITY_SLICE)
+               return;
+       slice = COMPATIBILITY_SLICE;
+       sname = dsname(newrootdev, unit, slice, part, partname);
+       rootdevnames[1] = kmalloc(strlen(sname) + 6, M_DEVBUF, M_WAITOK);
+       ksprintf(rootdevnames[1], "ufs:%s%s", sname, partname);
+}
+#endif
+
+#if defined(NFS) && defined(NFS_ROOT)
+#if !defined(BOOTP_NFSROOT)
+
+#include <sys/socket.h>
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <net/if_types.h>
+#include <net/if_var.h>
+#include <net/ethernet.h>
+#include <netinet/in.h>
+#include <vfs/nfs/rpcv2.h>
+#include <vfs/nfs/nfsproto.h>
+#include <vfs/nfs/nfs.h>
+#include <vfs/nfs/nfsdiskless.h>
+
+extern struct nfs_diskless     nfs_diskless;
+
+/*
+ * Convert a kenv variable to a sockaddr.  If the kenv variable does not
+ * exist the sockaddr will remain zerod out (callers typically just check
+ * sin_len).  A network address of 0.0.0.0 is equivalent to failure.
+ */
+static int
+inaddr_to_sockaddr(char *ev, struct sockaddr_in *sa)
+{
+       u_int32_t       a[4];
+       char            *cp;
+
+       bzero(sa, sizeof(*sa));
+
+       if ((cp = kgetenv(ev)) == NULL)
+               return(1);
+       if (ksscanf(cp, "%d.%d.%d.%d", &a[0], &a[1], &a[2], &a[3]) != 4)
+               return(1);
+       if (a[0] == 0 && a[1] == 0 && a[2] == 0 && a[3] == 0)
+               return(1);
+       /* XXX is this ordering correct? */
+       sa->sin_addr.s_addr = (a[3] << 24) + (a[2] << 16) + (a[1] << 8) + a[0];
+       sa->sin_len = sizeof(*sa);
+       sa->sin_family = AF_INET;
+       return(0);
+}
+
+static int
+hwaddr_to_sockaddr(char *ev, struct sockaddr_dl *sa)
+{
+       char            *cp;
+       u_int32_t       a[6];
+
+       bzero(sa, sizeof(*sa));
+       sa->sdl_len = sizeof(*sa);
+       sa->sdl_family = AF_LINK;
+       sa->sdl_type = IFT_ETHER;
+       sa->sdl_alen = ETHER_ADDR_LEN;
+       if ((cp = kgetenv(ev)) == NULL)
+               return(1);
+       if (ksscanf(cp, "%x:%x:%x:%x:%x:%x", &a[0], &a[1], &a[2], &a[3], &a[4], &a[5]) != 6)
+               return(1);
+       sa->sdl_data[0] = a[0];
+       sa->sdl_data[1] = a[1];
+       sa->sdl_data[2] = a[2];
+       sa->sdl_data[3] = a[3];
+       sa->sdl_data[4] = a[4];
+       sa->sdl_data[5] = a[5];
+       return(0);
+}
+
+static int
+decode_nfshandle(char *ev, u_char *fh) 
+{
+       u_char  *cp;
+       int     len, val;
+
+       if (((cp = kgetenv(ev)) == NULL) || (strlen(cp) < 2) || (*cp != 'X'))
+               return(0);
+       len = 0;
+       cp++;
+       for (;;) {
+               if (*cp == 'X')
+                       return(len);
+               if ((ksscanf(cp, "%2x", &val) != 1) || (val > 0xff))
+                       return(0);
+               *(fh++) = val;
+               len++;
+               cp += 2;
+               if (len > NFSX_V2FH)
+                   return(0);
+       }
+}
+
+/*
+ * Populate the essential fields in the nfsv3_diskless structure.
+ *
+ * The loader is expected to export the following environment variables:
+ *
+ * boot.netif.ip               IP address on boot interface
+ * boot.netif.netmask          netmask on boot interface
+ * boot.netif.gateway          default gateway (optional)
+ * boot.netif.hwaddr           hardware address of boot interface
+ * boot.nfsroot.server         IP address of root filesystem server
+ * boot.nfsroot.path           path of the root filesystem on server
+ * boot.nfsroot.nfshandle      NFS handle for root filesystem on server
+ */
+static void
+pxe_setup_nfsdiskless(void)
+{
+       struct nfs_diskless     *nd = &nfs_diskless;
+       struct ifnet            *ifp;
+       struct ifaddr           *ifa;
+       struct sockaddr_dl      *sdl, ourdl;
+       struct sockaddr_in      myaddr, netmask;
+       char                    *cp;
+
+       /* set up interface */
+       if (inaddr_to_sockaddr("boot.netif.ip", &myaddr))
+               return;
+       if (inaddr_to_sockaddr("boot.netif.netmask", &netmask)) {
+               kprintf("PXE: no netmask\n");
+               return;
+       }
+       bcopy(&myaddr, &nd->myif.ifra_addr, sizeof(myaddr));
+       bcopy(&myaddr, &nd->myif.ifra_broadaddr, sizeof(myaddr));
+       ((struct sockaddr_in *) &nd->myif.ifra_broadaddr)->sin_addr.s_addr =
+               myaddr.sin_addr.s_addr | ~ netmask.sin_addr.s_addr;
+       bcopy(&netmask, &nd->myif.ifra_mask, sizeof(netmask));
+
+       if (hwaddr_to_sockaddr("boot.netif.hwaddr", &ourdl)) {
+               kprintf("PXE: no hardware address\n");
+               return;
+       }
+       ifa = NULL;
+       ifp = TAILQ_FIRST(&ifnet);
+       TAILQ_FOREACH(ifp, &ifnet, if_link) {
+               TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+                       if ((ifa->ifa_addr->sa_family == AF_LINK) &&
+                           (sdl = ((struct sockaddr_dl *)ifa->ifa_addr))) {
+                               if ((sdl->sdl_type == ourdl.sdl_type) &&
+                                   (sdl->sdl_alen == ourdl.sdl_alen) &&
+                                   !bcmp(sdl->sdl_data + sdl->sdl_nlen,
+                                         ourdl.sdl_data + ourdl.sdl_nlen, 
+                                         sdl->sdl_alen))
+                                   goto match_done;
+                       }
+               }
+       }
+       kprintf("PXE: no interface\n");
+       return; /* no matching interface */
+match_done:
+       strlcpy(nd->myif.ifra_name, ifp->if_xname, sizeof(nd->myif.ifra_name));
+       
+       /* set up gateway */
+       inaddr_to_sockaddr("boot.netif.gateway", &nd->mygateway);
+
+       /* XXX set up swap? */
+
+       /* set up root mount */
+       nd->root_args.rsize = 8192;             /* XXX tunable? */
+       nd->root_args.wsize = 8192;
+       nd->root_args.sotype = SOCK_DGRAM;
+       nd->root_args.flags = (NFSMNT_WSIZE | NFSMNT_RSIZE | NFSMNT_RESVPORT);
+       if (inaddr_to_sockaddr("boot.nfsroot.server", &nd->root_saddr)) {
+               kprintf("PXE: no server\n");
+               return;
+       }
+       nd->root_saddr.sin_port = htons(NFS_PORT);
+
+       /*
+        * A tftp-only loader may pass NFS path information without a 
+        * root handle.  Generate a warning but continue configuring.
+        */
+       if (decode_nfshandle("boot.nfsroot.nfshandle", &nd->root_fh[0]) == 0) {
+               kprintf("PXE: Warning, no NFS handle passed from loader\n");
+       }
+       if ((cp = kgetenv("boot.nfsroot.path")) != NULL)
+               strncpy(nd->root_hostnam, cp, MNAMELEN - 1);
+
+       nfs_diskless_valid = 1;
+}
+
+#endif
+#endif
diff --git a/sys/platform/pc64/amd64/busdma_machdep.c b/sys/platform/pc64/amd64/busdma_machdep.c
new file mode 100644 (file)
index 0000000..fce32ed
--- /dev/null
@@ -0,0 +1,901 @@
+/*
+ * Copyright (c) 1997, 1998 Justin T. Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.16.2.2 2003/01/23 00:55:27 scottl Exp $
+ * $DragonFly: src/sys/platform/pc64/amd64/busdma_machdep.c,v 1.1 2007/09/23 04:29:31 yanyh Exp $
+ * $DragonFly: src/sys/platform/pc64/amd64/busdma_machdep.c,v 1.1 2007/09/23 04:29:31 yanyh Exp $
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/uio.h>
+#include <sys/thread2.h>
+#include <sys/bus_dma.h>
+
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+
+/* XXX needed for to access pmap to convert per-proc virtual to physical */
+#include <sys/proc.h>
+#include <sys/lock.h>
+#include <vm/vm_map.h>
+
+#include <machine/md_var.h>
+
+#define MAX_BPAGES 128
+
+struct bus_dma_tag {
+       bus_dma_tag_t     parent;
+       bus_size_t        alignment;
+       bus_size_t        boundary;
+       bus_addr_t        lowaddr;
+       bus_addr_t        highaddr;
+       bus_dma_filter_t *filter;
+       void             *filterarg;
+       bus_size_t        maxsize;
+       u_int             nsegments;
+       bus_size_t        maxsegsz;
+       int               flags;
+       int               ref_count;
+       int               map_count;
+       bus_dma_segment_t *segments;
+};
+
+struct bounce_page {
+       vm_offset_t     vaddr;          /* kva of bounce buffer */
+       bus_addr_t      busaddr;        /* Physical address */
+       vm_offset_t     datavaddr;      /* kva of client data */
+       bus_size_t      datacount;      /* client data count */
+       STAILQ_ENTRY(bounce_page) links;
+};
+
+int busdma_swi_pending;
+
+static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
+static int free_bpages;
+static int reserved_bpages;
+static int active_bpages;
+static int total_bpages;
+static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR;
+
+struct bus_dmamap {
+       struct bp_list         bpages;
+       int                    pagesneeded;
+       int                    pagesreserved;
+       bus_dma_tag_t          dmat;
+       void                  *buf;             /* unmapped buffer pointer */
+       bus_size_t             buflen;          /* unmapped buffer length */
+       bus_dmamap_callback_t *callback;
+       void                  *callback_arg;
+       STAILQ_ENTRY(bus_dmamap) links;
+};
+
+static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
+static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
+static struct bus_dmamap nobounce_dmamap;
+
+static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
+static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map);
+static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
+                                  vm_offset_t vaddr, bus_size_t size);
+static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
+static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
+
+static __inline int
+run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
+{
+       int retval;
+
+       retval = 0;
+       do {
+               if (paddr > dmat->lowaddr
+                && paddr <= dmat->highaddr
+                && (dmat->filter == NULL
+                 || (*dmat->filter)(dmat->filterarg, paddr) != 0))
+                       retval = 1;
+
+               dmat = dmat->parent;            
+       } while (retval == 0 && dmat != NULL);
+       return (retval);
+}
+
+#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
+/*
+ * Allocate a device specific dma_tag.
+ */
+int
+bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
+                  bus_size_t boundary, bus_addr_t lowaddr,
+                  bus_addr_t highaddr, bus_dma_filter_t *filter,
+                  void *filterarg, bus_size_t maxsize, int nsegments,
+                  bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat)
+{
+       bus_dma_tag_t newtag;
+       int error = 0;
+
+       /* Return a NULL tag on failure */
+       *dmat = NULL;
+
+       newtag = kmalloc(sizeof(*newtag), M_DEVBUF, M_INTWAIT);
+
+       newtag->parent = parent;
+       newtag->alignment = alignment;
+       newtag->boundary = boundary;
+       newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
+       newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1);
+       newtag->filter = filter;
+       newtag->filterarg = filterarg;
+       newtag->maxsize = maxsize;
+       newtag->nsegments = nsegments;
+       newtag->maxsegsz = maxsegsz;
+       newtag->flags = flags;
+       newtag->ref_count = 1; /* Count ourself */
+       newtag->map_count = 0;
+       newtag->segments = NULL;
+       
+       /* Take into account any restrictions imposed by our parent tag */
+       if (parent != NULL) {
+               newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
+               newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
+               /*
+                * XXX Not really correct??? Probably need to honor boundary
+                *     all the way up the inheritence chain.
+                */
+               newtag->boundary = MAX(parent->boundary, newtag->boundary);
+               if (newtag->filter == NULL) {
+                       /*
+                        * Short circuit looking at our parent directly
+                        * since we have encapsulated all of its information
+                        */
+                       newtag->filter = parent->filter;
+                       newtag->filterarg = parent->filterarg;
+                       newtag->parent = parent->parent;
+               }
+               if (newtag->parent != NULL) {
+                       parent->ref_count++;
+               }
+       }
+       
+       if (newtag->lowaddr < ptoa(Maxmem) &&
+           (flags & BUS_DMA_ALLOCNOW) != 0) {
+               /* Must bounce */
+
+               if (lowaddr > bounce_lowaddr) {
+                       /*
+                        * Go through the pool and kill any pages
+                        * that don't reside below lowaddr.
+                        */
+                       panic("bus_dma_tag_create: page reallocation "
+                             "not implemented");
+               }
+               if (ptoa(total_bpages) < maxsize) {
+                       int pages;
+
+                       pages = atop(maxsize) - total_bpages;
+
+                       /* Add pages to our bounce pool */
+                       if (alloc_bounce_pages(newtag, pages) < pages)
+                               error = ENOMEM;
+               }
+               /* Performed initial allocation */
+               newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
+       }
+       
+       if (error != 0) {
+               kfree(newtag, M_DEVBUF);
+       } else {
+               *dmat = newtag;
+       }
+       return (error);
+}
+
+int
+bus_dma_tag_destroy(bus_dma_tag_t dmat)
+{
+       if (dmat != NULL) {
+
+               if (dmat->map_count != 0)
+                       return (EBUSY);
+
+               while (dmat != NULL) {
+                       bus_dma_tag_t parent;
+
+                       parent = dmat->parent;
+                       dmat->ref_count--;
+                       if (dmat->ref_count == 0) {
+                               if (dmat->segments != NULL)
+                                       kfree(dmat->segments, M_DEVBUF);
+                               kfree(dmat, M_DEVBUF);
+                               /*
+                                * Last reference count, so
+                                * release our reference
+                                * count on our parent.
+                                */
+                               dmat = parent;
+                       } else
+                               dmat = NULL;
+               }
+       }
+       return (0);
+}
+
+/*
+ * Allocate a handle for mapping from kva/uva/physical
+ * address space into bus device space.
+ */
+int
+bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
+{
+       int error;
+
+       error = 0;
+
+       if (dmat->segments == NULL) {
+               KKASSERT(dmat->nsegments && dmat->nsegments < 16384);
+               dmat->segments = kmalloc(sizeof(bus_dma_segment_t) * 
+                                       dmat->nsegments, M_DEVBUF, M_INTWAIT);
+       }
+
+       if (dmat->lowaddr < ptoa(Maxmem)) {
+               /* Must bounce */
+               int maxpages;
+
+               *mapp = kmalloc(sizeof(**mapp), M_DEVBUF, M_INTWAIT);
+               if (*mapp == NULL) {
+                       return (ENOMEM);
+               } else {
+                       /* Initialize the new map */
+                       bzero(*mapp, sizeof(**mapp));
+                       STAILQ_INIT(&((*mapp)->bpages));
+               }
+               /*
+                * Attempt to add pages to our pool on a per-instance
+                * basis up to a sane limit.
+                */
+               maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr));
+               if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
+                || (dmat->map_count > 0
+                 && total_bpages < maxpages)) {
+                       int pages;
+
+                       if (dmat->lowaddr > bounce_lowaddr) {
+                               /*
+                                * Go through the pool and kill any pages
+                                * that don't reside below lowaddr.
+                                */
+                               panic("bus_dmamap_create: page reallocation "
+                                     "not implemented");
+                       }
+                       pages = atop(dmat->maxsize);
+                       pages = MIN(maxpages - total_bpages, pages);
+                       error = alloc_bounce_pages(dmat, pages);
+
+                       if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
+                               if (error == 0)
+                                       dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
+                       } else {
+                               error = 0;
+                       }
+               }
+       } else {
+               *mapp = NULL;
+       }
+       if (error == 0)
+               dmat->map_count++;
+       return (error);
+}
+
+/*
+ * Destroy a handle for mapping from kva/uva/physical
+ * address space into bus device space.
+ */
+int
+bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+       if (map != NULL) {
+               if (STAILQ_FIRST(&map->bpages) != NULL)
+                       return (EBUSY);
+               kfree(map, M_DEVBUF);
+       }
+       dmat->map_count--;
+       return (0);
+}
+
+
+/*
+ * Allocate a piece of memory that can be efficiently mapped into
+ * bus device space based on the constraints lited in the dma tag.
+ *
+ * mapp is degenerate.  By definition this allocation should not require
+ * bounce buffers so do not allocate a dma map.
+ */
+int
+bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
+                bus_dmamap_t *mapp)
+{
+       int mflags;
+       /* If we succeed, no mapping/bouncing will be required */
+       *mapp = NULL;
+
+       if (dmat->segments == NULL) {
+               KKASSERT(dmat->nsegments < 16384);
+               dmat->segments = kmalloc(sizeof(bus_dma_segment_t) * 
+                                       dmat->nsegments, M_DEVBUF, M_INTWAIT);
+       }
+
+       if (flags & BUS_DMA_NOWAIT)
+               mflags = M_NOWAIT;
+       else
+               mflags = M_WAITOK;
+       if (flags & BUS_DMA_ZERO)
+               mflags |= M_ZERO;
+
+       if ((dmat->maxsize <= PAGE_SIZE) &&
+           dmat->lowaddr >= ptoa(Maxmem)) {
+               *vaddr = kmalloc(dmat->maxsize, M_DEVBUF, mflags);
+               /*
+                * XXX Check whether the allocation crossed a page boundary
+                * and retry with power-of-2 alignment in that case.
+                */
+               if ((((intptr_t)*vaddr) & PAGE_MASK) !=
+                   (((intptr_t)*vaddr + dmat->maxsize) & PAGE_MASK)) {
+                       size_t size;
+                       kfree(*vaddr, M_DEVBUF);
+                       /* XXX check for overflow? */
+                       for (size = 1; size <= dmat->maxsize; size <<= 1)
+                               ;
+                       *vaddr = kmalloc(size, M_DEVBUF, mflags);
+               }
+       } else {
+               /*
+                * XXX Use Contigmalloc until it is merged into this facility
+                *     and handles multi-seg allocations.  Nobody is doing
+                *     multi-seg allocations yet though.
+                */
+               *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
+                   0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
+                   dmat->boundary);
+       }
+       if (*vaddr == NULL)
+               return (ENOMEM);
+       return (0);
+}
+
+/*
+ * Free a piece of memory and it's allociated dmamap, that was allocated
+ * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
+ */
+void
+bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
+{
+       /*
+        * dmamem does not need to be bounced, so the map should be
+        * NULL
+        */
+       if (map != NULL)
+               panic("bus_dmamem_free: Invalid map freed\n");
+       if ((dmat->maxsize <= PAGE_SIZE) &&
+           dmat->lowaddr >= ptoa(Maxmem))
+               kfree(vaddr, M_DEVBUF);
+       else
+               contigfree(vaddr, dmat->maxsize, M_DEVBUF);
+}
+
+#define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1)
+
+/*
+ * Map the buffer buf into bus space using the dmamap map.
+ */
+int
+bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
+               bus_size_t buflen, bus_dmamap_callback_t *callback,
+               void *callback_arg, int flags)
+{
+       vm_offset_t             vaddr;
+       vm_paddr_t              paddr;
+       bus_dma_segment_t      *sg;
+       int                     seg;
+       int                     error;
+       vm_paddr_t              nextpaddr;
+
+       if (map == NULL)
+               map = &nobounce_dmamap;
+
+       error = 0;
+       /*
+        * If we are being called during a callback, pagesneeded will
+        * be non-zero, so we can avoid doing the work twice.
+        */
+       if (dmat->lowaddr < ptoa(Maxmem) &&
+           map->pagesneeded == 0) {
+               vm_offset_t     vendaddr;
+
+               /*
+                * Count the number of bounce pages
+                * needed in order to complete this transfer
+                */
+               vaddr = trunc_page((vm_offset_t)buf);
+               vendaddr = (vm_offset_t)buf + buflen;
+
+               while (vaddr < vendaddr) {
+                       paddr = pmap_kextract(vaddr);
+                       if (run_filter(dmat, paddr) != 0) {
+
+                               map->pagesneeded++;
+                       }
+                       vaddr += PAGE_SIZE;
+               }
+       }
+
+       /* Reserve Necessary Bounce Pages */
+       if (map->pagesneeded != 0) {
+               crit_enter();
+               if (reserve_bounce_pages(dmat, map) != 0) {
+
+                       /* Queue us for resources */
+                       map->dmat = dmat;
+                       map->buf = buf;
+                       map->buflen = buflen;
+                       map->callback = callback;
+                       map->callback_arg = callback_arg;
+
+                       STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
+                       crit_exit();
+
+                       return (EINPROGRESS);
+               }
+               crit_exit();
+       }
+
+       vaddr = (vm_offset_t)buf;
+       sg = dmat->segments;
+       seg = 1;
+       sg->ds_len = 0;
+
+       nextpaddr = 0;
+       do {
+               bus_size_t      size;
+
+               paddr = pmap_kextract(vaddr);
+               size = PAGE_SIZE - (paddr & PAGE_MASK);
+               if (size > buflen)
+                       size = buflen;
+
+               if (map->pagesneeded != 0 && run_filter(dmat, paddr)) {
+                       paddr = add_bounce_page(dmat, map, vaddr, size);
+               }
+
+               if (sg->ds_len == 0) {
+                       sg->ds_addr = paddr;
+                       sg->ds_len = size;
+               } else if (paddr == nextpaddr) {
+                       sg->ds_len += size;
+               } else {
+                       /* Go to the next segment */
+                       sg++;
+                       seg++;
+                       if (seg > dmat->nsegments)
+                               break;
+                       sg->ds_addr = paddr;
+                       sg->ds_len = size;
+               }
+               vaddr += size;
+               nextpaddr = paddr + size;
+               buflen -= size;
+       } while (buflen > 0);
+
+       if (buflen != 0) {
+               kprintf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n",
+                      (u_long)buflen);
+               error = EFBIG;
+       }
+
+       (*callback)(callback_arg, dmat->segments, seg, error);
+
+       return (0);
+}
+
+/*
+ * Utility function to load a linear buffer.  lastaddrp holds state
+ * between invocations (for multiple-buffer loads).  segp contains
+ * the starting segment on entrace, and the ending segment on exit.
+ * first indicates if this is the first invocation of this function.
+ */
+static int
+_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
+                       void *buf, bus_size_t buflen,
+                       struct thread *td,
+                       int flags,
+                       vm_offset_t *lastaddrp,
+                       int *segp,
+                       int first)
+{
+       bus_dma_segment_t *segs;
+       bus_size_t sgsize;
+       bus_addr_t curaddr, lastaddr, baddr, bmask;
+       vm_offset_t vaddr = (vm_offset_t)buf;
+       int seg;
+       pmap_t pmap;
+
+       if (td->td_proc != NULL)
+               pmap = vmspace_pmap(td->td_proc->p_vmspace);
+       else
+               pmap = NULL;
+
+       segs = dmat->segments;
+       lastaddr = *lastaddrp;
+       bmask  = ~(dmat->boundary - 1);
+
+       for (seg = *segp; buflen > 0 ; ) {
+               /*
+                * Get the physical address for this segment.
+                */
+               if (pmap)
+                       curaddr = pmap_extract(pmap, vaddr);
+               else
+                       curaddr = pmap_kextract(vaddr);
+
+               /*
+                * Compute the segment size, and adjust counts.
+                */
+               sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
+               if (buflen < sgsize)
+                       sgsize = buflen;
+
+               /*
+                * Make sure we don't cross any boundaries.
+                */
+               if (dmat->boundary > 0) {
+                       baddr = (curaddr + dmat->boundary) & bmask;
+                       if (sgsize > (baddr - curaddr))
+                               sgsize = (baddr - curaddr);
+               }
+
+               /*
+                * Insert chunk into a segment, coalescing with
+                * previous segment if possible.
+                */
+               if (first) {
+                       segs[seg].ds_addr = curaddr;
+                       segs[seg].ds_len = sgsize;
+                       first = 0;
+               } else {
+                       if (curaddr == lastaddr &&
+                           (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
+                           (dmat->boundary == 0 ||
+                            (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
+                               segs[seg].ds_len += sgsize;
+                       else {
+                               if (++seg >= dmat->nsegments)
+                                       break;
+                               segs[seg].ds_addr = curaddr;
+                               segs[seg].ds_len = sgsize;
+                       }
+               }
+
+               lastaddr = curaddr + sgsize;
+               vaddr += sgsize;
+               buflen -= sgsize;
+       }
+
+       *segp = seg;
+       *lastaddrp = lastaddr;
+
+       /*
+        * Did we fit?
+        */
+       return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
+}
+
+/*
+ * Like _bus_dmamap_load(), but for mbufs.
+ */
+int
+bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
+                    struct mbuf *m0,
+                    bus_dmamap_callback2_t *callback, void *callback_arg,
+                    int flags)
+{
+       int nsegs, error;
+
+       KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL,
+               ("bus_dmamap_load_mbuf: No support for bounce pages!"));
+       KASSERT(m0->m_flags & M_PKTHDR,
+               ("bus_dmamap_load_mbuf: no packet header"));
+
+       nsegs = 0;
+       error = 0;
+       if (m0->m_pkthdr.len <= dmat->maxsize) {
+               int first = 1;
+               vm_offset_t lastaddr = 0;
+               struct mbuf *m;
+
+               for (m = m0; m != NULL && error == 0; m = m->m_next) {
+                       if ( m->m_len == 0 )
+                               continue;
+                       error = _bus_dmamap_load_buffer(dmat,
+                                       m->m_data, m->m_len,
+                                       curthread, flags, &lastaddr,
+                                       &nsegs, first);
+                       first = 0;
+               }
+       } else {
+               error = EINVAL;
+       }
+
+       if (error) {
+               /* force "no valid mappings" in callback */
+               (*callback)(callback_arg, dmat->segments, 0, 0, error);
+       } else {
+               (*callback)(callback_arg, dmat->segments,
+                           nsegs+1, m0->m_pkthdr.len, error);
+       }
+       return (error);
+}
+
+/*
+ * Like _bus_dmamap_load(), but for uios.
+ */
+int
+bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
+                   struct uio *uio,
+                   bus_dmamap_callback2_t *callback, void *callback_arg,
+                   int flags)
+{
+       vm_offset_t lastaddr;
+       int nsegs, error, first, i;
+       bus_size_t resid;
+       struct iovec *iov;
+       struct thread *td = NULL;
+
+       KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL,
+               ("bus_dmamap_load_uio: No support for bounce pages!"));
+
+       resid = uio->uio_resid;
+       iov = uio->uio_iov;
+
+       if (uio->uio_segflg == UIO_USERSPACE) {
+               td = uio->uio_td;
+               KASSERT(td != NULL && td->td_proc != NULL,
+                       ("bus_dmamap_load_uio: USERSPACE but no proc"));
+       }
+
+       nsegs = 0;
+       error = 0;
+       first = 1;
+       for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
+               /*
+                * Now at the first iovec to load.  Load each iovec
+                * until we have exhausted the residual count.
+                */
+               bus_size_t minlen =
+                       resid < iov[i].iov_len ? resid : iov[i].iov_len;
+               caddr_t addr = (caddr_t) iov[i].iov_base;
+
+               error = _bus_dmamap_load_buffer(dmat,
+                               addr, minlen,
+                               td, flags, &lastaddr, &nsegs, first);
+               first = 0;
+
+               resid -= minlen;
+       }
+
+       if (error) {
+               /* force "no valid mappings" in callback */
+               (*callback)(callback_arg, dmat->segments, 0, 0, error);
+       } else {
+               (*callback)(callback_arg, dmat->segments,
+                           nsegs+1, uio->uio_resid, error);
+       }
+       return (error);
+}
+
+/*
+ * Release the mapping held by map.
+ */
+void
+_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+       struct bounce_page *bpage;
+
+       while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
+               STAILQ_REMOVE_HEAD(&map->bpages, links);
+               free_bounce_page(dmat, bpage);
+       }
+}
+
+void
+_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
+{
+       struct bounce_page *bpage;
+
+       if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
+               
+               /*
+                * Handle data bouncing.  We might also
+                * want to add support for invalidating
+                * the caches on broken hardware
+                */
+               switch (op) {
+               case BUS_DMASYNC_PREWRITE:
+                       while (bpage != NULL) {
+                               bcopy((void *)bpage->datavaddr,
+                                     (void *)bpage->vaddr,
+                                     bpage->datacount);
+                               bpage = STAILQ_NEXT(bpage, links);
+                       }
+                       break;
+
+               case BUS_DMASYNC_POSTREAD:
+                       while (bpage != NULL) {
+                               bcopy((void *)bpage->vaddr,
+                                     (void *)bpage->datavaddr,
+                                     bpage->datacount);
+                               bpage = STAILQ_NEXT(bpage, links);
+                       }
+                       break;
+               case BUS_DMASYNC_PREREAD:
+               case BUS_DMASYNC_POSTWRITE:
+                       /* No-ops */
+                       break;
+               }
+       }
+}
+
+static int
+alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
+{
+       int count;
+
+       count = 0;
+       if (total_bpages == 0) {
+               STAILQ_INIT(&bounce_page_list);
+               STAILQ_INIT(&bounce_map_waitinglist);
+               STAILQ_INIT(&bounce_map_callbacklist);
+       }
+       
+       while (numpages > 0) {
+               struct bounce_page *bpage;
+
+               bpage = (struct bounce_page *)kmalloc(sizeof(*bpage), M_DEVBUF,
+                                                    M_INTWAIT);
+
+               if (bpage == NULL)
+                       break;
+               bzero(bpage, sizeof(*bpage));
+               bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
+                                                        M_NOWAIT, 0ul,
+                                                        dmat->lowaddr,
+                                                        PAGE_SIZE,
+                                                        0);
+               if (bpage->vaddr == NULL) {
+                       kfree(bpage, M_DEVBUF);
+                       break;
+               }
+               bpage->busaddr = pmap_kextract(bpage->vaddr);
+               crit_enter();
+               STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
+               total_bpages++;
+               free_bpages++;
+               crit_exit();
+               count++;
+               numpages--;
+       }
+       return (count);
+}
+
+static int
+reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+       int pages;
+
+       pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
+       free_bpages -= pages;
+       reserved_bpages += pages;
+       map->pagesreserved += pages;
+       pages = map->pagesneeded - map->pagesreserved;
+
+       return (pages);
+}
+
+static bus_addr_t
+add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
+               bus_size_t size)
+{
+       struct bounce_page *bpage;
+
+       if (map->pagesneeded == 0)
+               panic("add_bounce_page: map doesn't need any pages");
+       map->pagesneeded--;
+
+       if (map->pagesreserved == 0)
+               panic("add_bounce_page: map doesn't need any pages");
+       map->pagesreserved--;
+
+       crit_enter();
+       bpage = STAILQ_FIRST(&bounce_page_list);
+       if (bpage == NULL)
+               panic("add_bounce_page: free page list is empty");
+
+       STAILQ_REMOVE_HEAD(&bounce_page_list, links);
+       reserved_bpages--;
+       active_bpages++;
+       crit_exit();
+
+       bpage->datavaddr = vaddr;
+       bpage->datacount = size;
+       STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
+       return (bpage->busaddr);
+}
+
+static void
+free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
+{
+       struct bus_dmamap *map;
+
+       bpage->datavaddr = 0;
+       bpage->datacount = 0;
+
+       crit_enter();
+       STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
+       free_bpages++;
+       active_bpages--;
+       if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
+               if (reserve_bounce_pages(map->dmat, map) == 0) {
+                       panic("free_bounce_pages: uncoded\n");
+#if 0
+                       STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
+                       STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
+                                          map, links);
+                       busdma_swi_pending = 1;
+                       setsoftvm();
+#endif
+               }
+       }
+       crit_exit();
+}
+
+#if 0
+
+void
+busdma_swi(void)
+{
+       struct bus_dmamap *map;
+
+       crit_enter();
+       while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
+               STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
+               crit_exit();
+               bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
+                               map->callback, map->callback_arg, /*flags*/0);
+               crit_enter();
+       }
+       crit_exit();
+}
+
+#endif
+
similarity index 78%
copy from sys/cpu/amd64/include/vframe.h
copy to sys/platform/pc64/amd64/console.c
index 0bf2ad6..01856f8 100644 (file)
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  * 
- * $DragonFly: src/sys/cpu/amd64/include/vframe.h,v 1.1 2007/08/21 19:40:24 corecode Exp $
+ * $DragonFly: src/sys/platform/pc64/amd64/console.c,v 1.1 2007/09/23 04:29:31 yanyh Exp $
+ * $DragonFly: src/sys/platform/pc64/amd64/console.c,v 1.1 2007/09/23 04:29:31 yanyh Exp $
  */
 
-#ifndef _CPU_VFRAME_H_
-#define _CPU_VFRAME_H_
-
-#ifndef _MACHINE_NPX_H_
-#include <machine/npx.h>
-#endif
-#ifndef _MACHINE_SEGMENTS_H_
-#include <machine/segments.h>
-#endif
+#include <sys/systm.h>
 
 /*
- * Virtualized external frame.  This is used by the virtual kernel in
- * addition to trapframe.
+ * Global console locking functions
  */
-struct vextframe {
-       /* XXX come back for fixing this in segments.h */
-       /* struct savetls vx_tls; */
-};
-
-#endif
+void
+cons_lock(void)
+{
+}
 
+void
+cons_unlock(void)
+{
+}
diff --git a/sys/platform/pc64/amd64/cpu_regs.c b/sys/platform/pc64/amd64/cpu_regs.c
new file mode 100644 (file)
index 0000000..329f776
--- /dev/null
@@ -0,0 +1,1252 @@
+/*-
+ * Copyright (c) 1992 Terrence R. Lambert.
+ * Copyright (C) 1994, David Greenman
+ * Copyright (c) 1982, 1987, 1990, 1993
+ *     The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *     This product includes software developed by the University of
+ *     California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *     from: @(#)machdep.c     7.4 (Berkeley) 6/3/91
+ * $FreeBSD: src/sys/i386/i386/machdep.c,v 1.385.2.30 2003/05/31 08:48:05 alc Exp $
+ * $DragonFly: src/sys/platform/pc64/amd64/Attic/cpu_regs.c,v 1.1 2007/09/23 04:29:31 yanyh Exp $
+ * $DragonFly: src/sys/platform/pc64/amd64/Attic/cpu_regs.c,v 1.1 2007/09/23 04:29:31 yanyh Exp $
+ */
+
+#include "use_ether.h"
+#include "use_npx.h"
+#include "use_isa.h"
+#include "opt_atalk.h"
+#include "opt_compat.h"
+#include "opt_ddb.h"
+#include "opt_directio.h"
+#include "opt_inet.h"
+#include "opt_ipx.h"
+#include "opt_msgbuf.h"
+#include "opt_swap.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/sysproto.h>
+#include <sys/signalvar.h>
+#include <sys/kernel.h>
+#include <sys/linker.h>
+#include <sys/malloc.h>
+#include <sys/proc.h>
+#include <sys/buf.h>
+#include <sys/reboot.h>
+#include <sys/mbuf.h>
+#include <sys/msgbuf.h>
+#include <sys/sysent.h>
+#include <sys/sysctl.h>
+#include <sys/vmmeter.h>
+#include <sys/bus.h>
+#include <sys/upcall.h>
+#include <sys/usched.h>
+#include <sys/reg.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <sys/lock.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#include <vm/vm_pager.h>
+#include <vm/vm_extern.h>
+
+#include <sys/thread2.h>
+
+#include <sys/user.h>
+#include <sys/exec.h>
+#include <sys/cons.h>
+
+#include <ddb/ddb.h>
+
+#include <machine/cpu.h>
+#include <machine/clock.h>
+#include <machine/specialreg.h>
+#include <machine/md_var.h>
+#include <machine/pcb_ext.h>           /* pcb.h included via sys/user.h */
+#include <machine/globaldata.h>                /* CPU_prvspace */
+#include <machine/smp.h>
+#ifdef PERFMON
+#include <machine/perfmon.h>
+#endif
+#include <machine/cputypes.h>
+
+#include <bus/isa/rtc.h>
+/* #include <machine/vm86.h> */
+#include <sys/random.h>
+#include <sys/ptrace.h>
+#include <machine/sigframe.h>
+#include <unistd.h>            /* umtx_* functions */
+
+extern void dblfault_handler (void);
+
+#ifndef CPU_DISABLE_SSE
+static void set_fpregs_xmm (struct save87 *, struct savexmm *);
+static void fill_fpregs_xmm (struct savexmm *, struct save87 *);
+#endif /* CPU_DISABLE_SSE */
+#ifdef DIRECTIO
+extern void ffs_rawread_setup(void);
+#endif /* DIRECTIO */
+
+#ifdef SMP
+int64_t tsc_offsets[MAXCPU];
+#else
+int64_t tsc_offsets[1];
+#endif
+
+#if defined(SWTCH_OPTIM_STATS)
+extern int swtch_optim_stats;
+SYSCTL_INT(_debug, OID_AUTO, swtch_optim_stats,
+       CTLFLAG_RD, &swtch_optim_stats, 0, "");
+SYSCTL_INT(_debug, OID_AUTO, tlb_flush_count,
+       CTLFLAG_RD, &tlb_flush_count, 0, "");
+#endif
+
+static int
+sysctl_hw_physmem(SYSCTL_HANDLER_ARGS)
+{
+       int error = sysctl_handle_int(oidp, 0, ctob((int)Maxmem), req);
+       return (error);
+}
+
+SYSCTL_PROC(_hw, HW_PHYSMEM, physmem, CTLTYPE_INT|CTLFLAG_RD,
+       0, 0, sysctl_hw_physmem, "IU", "");
+
+static int
+sysctl_hw_usermem(SYSCTL_HANDLER_ARGS)
+{
+       int error = sysctl_handle_int(oidp, 0,
+               ctob((int)Maxmem - vmstats.v_wire_count), req);
+       return (error);
+}
+
+SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT|CTLFLAG_RD,
+       0, 0, sysctl_hw_usermem, "IU", "");
+
+SYSCTL_ULONG(_hw, OID_AUTO, availpages, CTLFLAG_RD, &Maxmem, NULL, "");
+
+#if 0
+
+static int
+sysctl_machdep_msgbuf(SYSCTL_HANDLER_ARGS)
+{
+       int error;
+
+       /* Unwind the buffer, so that it's linear (possibly starting with
+        * some initial nulls).
+        */
+       error=sysctl_handle_opaque(oidp,msgbufp->msg_ptr+msgbufp->msg_bufr,
+               msgbufp->msg_size-msgbufp->msg_bufr,req);
+       if(error) return(error);
+       if(msgbufp->msg_bufr>0) {
+               error=sysctl_handle_opaque(oidp,msgbufp->msg_ptr,
+                       msgbufp->msg_bufr,req);
+       }
+       return(error);
+}
+
+SYSCTL_PROC(_machdep, OID_AUTO, msgbuf, CTLTYPE_STRING|CTLFLAG_RD,
+       0, 0, sysctl_machdep_msgbuf, "A","Contents of kernel message buffer");
+
+static int msgbuf_clear;
+
+static int
+sysctl_machdep_msgbuf_clear(SYSCTL_HANDLER_ARGS)
+{
+       int error;
+       error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2,
+               req);
+       if (!error && req->newptr) {
+               /* Clear the buffer and reset write pointer */
+               bzero(msgbufp->msg_ptr,msgbufp->msg_size);
+               msgbufp->msg_bufr=msgbufp->msg_bufx=0;
+               msgbuf_clear=0;
+       }
+       return (error);
+}
+
+SYSCTL_PROC(_machdep, OID_AUTO, msgbuf_clear, CTLTYPE_INT|CTLFLAG_RW,
+       &msgbuf_clear, 0, sysctl_machdep_msgbuf_clear, "I",
+       "Clear kernel message buffer");
+
+#endif
+
+/*
+ * Send an interrupt to process.
+ *
+ * Stack is set up to allow sigcode stored
+ * at top to call routine, followed by kcall
+ * to sigreturn routine below.  After sigreturn
+ * resets the signal mask, the stack, and the
+ * frame pointer, it returns to the user
+ * specified pc, psl.
+ */
+
+extern int _ucodesel, _udatasel;
+void
+sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code)
+{
+       struct lwp *lp = curthread->td_lwp;
+       struct proc *p = lp->lwp_proc;
+       struct trapframe *regs;
+       struct sigacts *psp = p->p_sigacts;
+       struct sigframe sf, *sfp;
+       int oonstack;
+
+       regs = lp->lwp_md.md_regs;
+       oonstack = (lp->lwp_sigstk.ss_flags & SS_ONSTACK) ? 1 : 0;
+
+       /* save user context */
+       bzero(&sf, sizeof(struct sigframe));
+       sf.sf_uc.uc_sigmask = *mask;
+       sf.sf_uc.uc_stack = lp->lwp_sigstk;
+       sf.sf_uc.uc_mcontext.mc_onstack = oonstack;
+       /* bcopy(regs, &sf.sf_uc.uc_mcontext.mc_gs, sizeof(struct trapframe)); */
+
+       /* make the size of the saved context visible to userland */
+       sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); 
+
+       /* save mailbox pending state for syscall interlock semantics */
+       if (p->p_flag & P_MAILBOX)
+               sf.sf_uc.uc_mcontext.mc_flags |= PGEX_MAILBOX;
+
+
+       /* Allocate and validate space for the signal handler context. */
+        if ((lp->lwp_flag & LWP_ALTSTACK) != 0 && !oonstack &&
+           SIGISMEMBER(psp->ps_sigonstack, sig)) {
+               sfp = (struct sigframe *)(lp->lwp_sigstk.ss_sp +
+                   lp->lwp_sigstk.ss_size - sizeof(struct sigframe));
+               lp->lwp_sigstk.ss_flags |= SS_ONSTACK;
+       }
+       else
+               sfp = (struct sigframe *)regs->tf_rsp - 1;
+
+       /* Translate the signal is appropriate */
+       if (p->p_sysent->sv_sigtbl) {
+               if (sig <= p->p_sysent->sv_sigsize)
+                       sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
+       }
+
+       /* Build the argument list for the signal handler. */
+       sf.sf_signum = sig;
+       sf.sf_ucontext = (register_t)&sfp->sf_uc;
+       if (SIGISMEMBER(psp->ps_siginfo, sig)) {
+               /* Signal handler installed with SA_SIGINFO. */
+               sf.sf_siginfo = (register_t)&sfp->sf_si;
+               sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
+
+               /* fill siginfo structure */
+               sf.sf_si.si_signo = sig;
+               sf.sf_si.si_code = code;
+               sf.sf_si.si_addr = (void*)regs->tf_err;
+       }
+       else {
+               /* Old FreeBSD-style arguments. */
+               sf.sf_siginfo = code;
+               sf.sf_addr = regs->tf_err;
+               sf.sf_ahu.sf_handler = catcher;
+       }
+
+#if 0
+       /*
+        * If we're a vm86 process, we want to save the segment registers.
+        * We also change eflags to be our emulated eflags, not the actual
+        * eflags.
+        */
+       if (regs->tf_rflags & PSL_VM) {
+               struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
+               struct vm86_kernel *vm86 = &lp->lwp_thread->td_pcb->pcb_ext->ext_vm86;
+
+               sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
+               sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
+               sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
+               sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
+
+               if (vm86->vm86_has_vme == 0)
+                       sf.sf_uc.uc_mcontext.mc_eflags =
+                           (tf->tf_rflags & ~(PSL_VIF | PSL_VIP)) |
+                           (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
+
+               /*
+                * Clear PSL_NT to inhibit T_TSSFLT faults on return from
+                * syscalls made by the signal handler.  This just avoids
+                * wasting time for our lazy fixup of such faults.  PSL_NT
+                * does nothing in vm86 mode, but vm86 programs can set it
+                * almost legitimately in probes for old cpu types.
+                */
+               tf->tf_rflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
+       }
+#endif
+
+       /*
+        * Copy the sigframe out to the user's stack.
+        */
+       if (copyout(&sf, sfp, sizeof(struct sigframe)) != 0) {
+               /*
+                * Something is wrong with the stack pointer.
+                * ...Kill the process.
+                */
+               sigexit(p, SIGILL);
+       }
+
+       regs->tf_rsp = (int)sfp;
+       regs->tf_rip = PS_STRINGS - *(p->p_sysent->sv_szsigcode);
+       regs->tf_rflags &= ~PSL_T;
+       regs->tf_cs = _ucodesel;
+       /* regs->tf_ds = _udatasel;
+       regs->tf_es = _udatasel; */
+       if (regs->tf_trapno == T_PROTFLT) {
+               /* regs->tf_fs = _udatasel;
+               regs->tf_gs = _udatasel; */
+       }
+       regs->tf_ss = _udatasel;
+}
+
+/*
+ * Sanitize the trapframe for a virtual kernel passing control to a custom
+ * VM context.
+ *
+ * Allow userland to set or maintain PSL_RF, the resume flag.  This flag
+ * basically controls whether the return PC should skip the first instruction
+ * (as in an explicit system call) or re-execute it (as in an exception).
+ */
+int
+cpu_sanitize_frame(struct trapframe *frame)
+{
+       frame->tf_cs = _ucodesel;
+#if 0
+       frame->tf_ds = _udatasel;
+       frame->tf_es = _udatasel;
+       frame->tf_fs = _udatasel;
+       frame->tf_gs = _udatasel;
+#endif
+       frame->tf_ss = _udatasel;
+       frame->tf_rflags &= (PSL_RF | PSL_USERCHANGE);
+       frame->tf_rflags |= PSL_RESERVED_DEFAULT | PSL_I;
+       return(0);
+}
+
+int
+cpu_sanitize_tls(struct savetls *tls)
+{
+        struct segment_descriptor *desc;
+        int i;
+
+        for (i = 0; i < NGTLS; ++i) {
+               desc = &tls->tls[i];
+               if (desc->sd_dpl == 0 && desc->sd_type == 0)
+                       continue;
+               if (desc->sd_def32 == 0)
+                       return(ENXIO);
+               if (desc->sd_type != SDT_MEMRWA)
+                       return(ENXIO);
+               if (desc->sd_dpl != SEL_UPL)
+                       return(ENXIO);
+               if (desc->sd_xx != 0 || desc->sd_p != 1)
+                       return(ENXIO);
+        }
+        return(0);
+}
+
+/*
+ * sigreturn(ucontext_t *sigcntxp)
+ *
+ * System call to cleanup state after a signal
+ * has been taken.  Reset signal mask and
+ * stack state from context left by sendsig (above).
+ * Return to previous pc and psl as specified by
+ * context left by sendsig. Check carefully to
+ * make sure that the user has not modified the
+ * state to gain improper privileges.
+ */
+#define        EFL_SECURE(ef, oef)     ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
+#define        CS_SECURE(cs)           (ISPL(cs) == SEL_UPL)
+
+int
+sys_sigreturn(struct sigreturn_args *uap)
+{
+       struct lwp *lp = curthread->td_lwp;
+       struct proc *p = lp->lwp_proc;
+       struct trapframe *regs;
+       ucontext_t ucp;
+       int cs;
+       int rflags;
+       int error;
+
+       error = copyin(uap->sigcntxp, &ucp, sizeof(ucp));
+       if (error)
+               return (error);
+
+       regs = lp->lwp_md.md_regs;
+       rflags = ucp.uc_mcontext.mc_rflags;
+
+#if 0
+       if (eflags & PSL_VM) {
+               struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
+               struct vm86_kernel *vm86;
+
+               /*
+                * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
+                * set up the vm86 area, and we can't enter vm86 mode.
+                */
+               if (lp->lwp_thread->td_pcb->pcb_ext == 0)
+                       return (EINVAL);
+               vm86 = &lp->lwp_thread->td_pcb->pcb_ext->ext_vm86;
+               if (vm86->vm86_inited == 0)
+                       return (EINVAL);
+
+               /* go back to user mode if both flags are set */
+               if ((eflags & PSL_VIP) && (eflags & PSL_VIF))
+                       trapsignal(lp->lwp_proc, SIGBUS, 0);
+
+               if (vm86->vm86_has_vme) {
+                       eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
+                           (eflags & VME_USERCHANGE) | PSL_VM;
+               } else {
+                       vm86->vm86_eflags = eflags;     /* save VIF, VIP */
+                       eflags = (tf->tf_eflags & ~VM_USERCHANGE) |                                         (eflags & VM_USERCHANGE) | PSL_VM;
+               }
+               bcopy(&ucp.uc_mcontext.mc_gs, tf, sizeof(struct trapframe));
+               tf->tf_eflags = eflags;
+               tf->tf_vm86_ds = tf->tf_ds;
+               tf->tf_vm86_es = tf->tf_es;
+               tf->tf_vm86_fs = tf->tf_fs;
+               tf->tf_vm86_gs = tf->tf_gs;
+               tf->tf_ds = _udatasel;
+               tf->tf_es = _udatasel;
+#if 0
+               tf->tf_fs = _udatasel;
+               tf->tf_gs = _udatasel;
+#endif
+       } else 
+#endif
+       {
+               /*
+                * Don't allow users to change privileged or reserved flags.
+                */
+               /*
+                * XXX do allow users to change the privileged flag PSL_RF.
+                * The cpu sets PSL_RF in tf_eflags for faults.  Debuggers
+                * should sometimes set it there too.  tf_eflags is kept in
+                * the signal context during signal handling and there is no
+                * other place to remember it, so the PSL_RF bit may be
+                * corrupted by the signal handler without us knowing.
+                * Corruption of the PSL_RF bit at worst causes one more or
+                * one less debugger trap, so allowing it is fairly harmless.
+                */
+               if (!EFL_SECURE(rflags & ~PSL_RF, regs->tf_rflags & ~PSL_RF)) {
+                       kprintf("sigreturn: eflags = 0x%x\n", rflags);
+                       return(EINVAL);
+               }
+
+               /*
+                * Don't allow users to load a valid privileged %cs.  Let the
+                * hardware check for invalid selectors, excess privilege in
+                * other selectors, invalid %eip's and invalid %esp's.
+                */
+               cs = ucp.uc_mcontext.mc_cs;
+               if (!CS_SECURE(cs)) {
+                       kprintf("sigreturn: cs = 0x%x\n", cs);
+                       trapsignal(lp, SIGBUS, T_PROTFLT);
+                       return(EINVAL);
+               }
+               /* bcopy(&ucp.uc_mcontext.mc_gs, regs, sizeof(struct trapframe)); */
+       }
+
+       /*
+        * Merge saved signal mailbox pending flag to maintain interlock
+        * semantics against system calls.
+        */
+       if (ucp.uc_mcontext.mc_flags & PGEX_MAILBOX)
+               p->p_flag |= P_MAILBOX;
+
+       if (ucp.uc_mcontext.mc_onstack & 1)
+               lp->lwp_sigstk.ss_flags |= SS_ONSTACK;
+       else
+               lp->lwp_sigstk.ss_flags &= ~SS_ONSTACK;
+
+       lp->lwp_sigmask = ucp.uc_sigmask;
+       SIG_CANTMASK(lp->lwp_sigmask);
+       return(EJUSTRETURN);
+}
+
+/*
+ * Stack frame on entry to function.  %eax will contain the function vector,
+ * %ecx will contain the function data.  flags, ecx, and eax will have 
+ * already been pushed on the stack.
+ */
+struct upc_frame {
+       register_t      eax;
+       register_t      ecx;
+       register_t      edx;
+       register_t      flags;
+       register_t      oldip;
+};
+
+void
+sendupcall(struct vmupcall *vu, int morepending)
+{
+       struct lwp *lp = curthread->td_lwp;
+       struct trapframe *regs;
+       struct upcall upcall;
+       struct upc_frame upc_frame;
+       int     crit_count = 0;
+
+       /*
+        * If we are a virtual kernel running an emulated user process
+        * context, switch back to the virtual kernel context before
+        * trying to post the signal.
+        */
+       if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
+               lp->lwp_md.md_regs->tf_trapno = 0;
+               vkernel_trap(lp, lp->lwp_md.md_regs);
+       }
+
+       /*
+        * Get the upcall data structure
+        */
+       if (copyin(lp->lwp_upcall, &upcall, sizeof(upcall)) ||
+           copyin((char *)upcall.upc_uthread + upcall.upc_critoff, &crit_count, sizeof(int))
+       ) {
+               vu->vu_pending = 0;
+               kprintf("bad upcall address\n");
+               return;
+       }
+
+       /*
+        * If the data structure is already marked pending or has a critical
+        * section count, mark the data structure as pending and return 
+        * without doing an upcall.  vu_pending is left set.
+        */
+       if (upcall.upc_pending || crit_count >= vu->vu_pending) {
+               if (upcall.upc_pending < vu->vu_pending) {
+                       upcall.upc_pending = vu->vu_pending;
+                       copyout(&upcall.upc_pending, &lp->lwp_upcall->upc_pending,
+                               sizeof(upcall.upc_pending));
+               }
+               return;
+       }
+
+       /*
+        * We can run this upcall now, clear vu_pending.
+        *
+        * Bump our critical section count and set or clear the
+        * user pending flag depending on whether more upcalls are
+        * pending.  The user will be responsible for calling 
+        * upc_dispatch(-1) to process remaining upcalls.
+        */
+       vu->vu_pending = 0;
+       upcall.upc_pending = morepending;
+       crit_count += TDPRI_CRIT;
+       copyout(&upcall.upc_pending, &lp->lwp_upcall->upc_pending, 
+               sizeof(upcall.upc_pending));
+       copyout(&crit_count, (char *)upcall.upc_uthread + upcall.upc_critoff,
+               sizeof(int));
+
+       /*
+        * Construct a stack frame and issue the upcall
+        */
+       regs = lp->lwp_md.md_regs;
+       upc_frame.eax = regs->tf_rax;
+       upc_frame.ecx = regs->tf_rcx;
+       upc_frame.edx = regs->tf_rdx;
+       upc_frame.flags = regs->tf_rflags;
+       upc_frame.oldip = regs->tf_rip;
+       if (copyout(&upc_frame, (void *)(regs->tf_rsp - sizeof(upc_frame)),
+           sizeof(upc_frame)) != 0) {
+               kprintf("bad stack on upcall\n");
+       } else {
+               regs->tf_rax = (register_t)vu->vu_func;
+               regs->tf_rcx = (register_t)vu->vu_data;
+               regs->tf_rdx = (register_t)lp->lwp_upcall;
+               regs->tf_rip = (register_t)vu->vu_ctx;
+               regs->tf_rsp -= sizeof(upc_frame);
+       }
+}
+
+/*
+ * fetchupcall occurs in the context of a system call, which means that
+ * we have to return EJUSTRETURN in order to prevent eax and edx from
+ * being overwritten by the syscall return value.
+ *
+ * if vu is not NULL we return the new context in %edx, the new data in %ecx,
+ * and the function pointer in %eax.  
+ */
+int
+fetchupcall (struct vmupcall *vu, int morepending, void *rsp)
+{
+       struct upc_frame upc_frame;
+       struct lwp *lp = curthread->td_lwp;
+       struct trapframe *regs;
+       int error;
+       struct upcall upcall;
+       int crit_count;
+
+       regs = lp->lwp_md.md_regs;
+
+       error = copyout(&morepending, &lp->lwp_upcall->upc_pending, sizeof(int));
+       if (error == 0) {
+           if (vu) {
+               /*
+                * This jumps us to the next ready context.
+                */
+               vu->vu_pending = 0;
+               error = copyin(lp->lwp_upcall, &upcall, sizeof(upcall));
+               crit_count = 0;
+               if (error == 0)
+                       error = copyin((char *)upcall.upc_uthread + upcall.upc_critoff, &crit_count, sizeof(int));
+               crit_count += TDPRI_CRIT;
+               if (error == 0)
+                       error = copyout(&crit_count, (char *)upcall.upc_uthread + upcall.upc_critoff, sizeof(int));
+               regs->tf_rax = (register_t)vu->vu_func;
+               regs->tf_rcx = (register_t)vu->vu_data;
+               regs->tf_rdx = (register_t)lp->lwp_upcall;
+               regs->tf_rip = (register_t)vu->vu_ctx;
+               regs->tf_rsp = (register_t)rsp;
+           } else {
+               /*
+                * This returns us to the originally interrupted code.
+                */
+               error = copyin(rsp, &upc_frame, sizeof(upc_frame));
+               regs->tf_rax = upc_frame.eax;
+               regs->tf_rcx = upc_frame.ecx;
+               regs->tf_rdx = upc_frame.edx;
+               regs->tf_rflags = (regs->tf_rflags & ~PSL_USERCHANGE) |
+                               (upc_frame.flags & PSL_USERCHANGE);
+               regs->tf_rip = upc_frame.oldip;
+               regs->tf_rsp = (register_t)((char *)rsp + sizeof(upc_frame));
+           }
+       }
+       if (error == 0)
+               error = EJUSTRETURN;
+       return(error);
+}
+
+/*
+ * cpu_idle() represents the idle LWKT.  You cannot return from this function
+ * (unless you want to blow things up!).  Instead we look for runnable threads
+ * and loop or halt as appropriate.  Giant is not held on entry to the thread.
+ *
+ * The main loop is entered with a critical section held, we must release
+ * the critical section before doing anything else.  lwkt_switch() will
+ * check for pending interrupts due to entering and exiting its own 
+ * critical section.
+ *
+ * Note on cpu_idle_hlt:  On an SMP system we rely on a scheduler IPI
+ * to wake a HLTed cpu up.  However, there are cases where the idlethread
+ * will be entered with the possibility that no IPI will occur and in such
+ * cases lwkt_switch() sets TDF_IDLE_NOHLT.
+ */
+static int     cpu_idle_hlt = 1;
+static int     cpu_idle_hltcnt;
+static int     cpu_idle_spincnt;
+SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW,
+    &cpu_idle_hlt, 0, "Idle loop HLT enable");
+SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hltcnt, CTLFLAG_RW,
+    &cpu_idle_hltcnt, 0, "Idle loop entry halts");
+SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_spincnt, CTLFLAG_RW,
+    &cpu_idle_spincnt, 0, "Idle loop entry spins");
+
+void
+cpu_idle(void)
+{
+       struct thread *td = curthread;
+       struct mdglobaldata *gd = mdcpu;
+
+       crit_exit();
+       KKASSERT(td->td_pri < TDPRI_CRIT);
+       for (;;) {
+               /*
+                * See if there are any LWKTs ready to go.
+                */
+               lwkt_switch();
+
+               /*
+                * The idle loop halts only if no threads are scheduleable
+                * and no signals have occured.
+                */
+               if (cpu_idle_hlt && !lwkt_runnable() &&
+                   (td->td_flags & TDF_IDLE_NOHLT) == 0) {
+                       splz();
+                       if (!lwkt_runnable()) {
+#ifdef DEBUGIDLE
+                               struct timeval tv1, tv2;
+                               gettimeofday(&tv1, NULL);
+#endif
+                               /* umtx_sleep(&gd->mi.gd_runqmask, 0, 1000000); */
+#ifdef DEBUGIDLE
+                               gettimeofday(&tv2, NULL);
+                               if (tv2.tv_usec - tv1.tv_usec +
+                                   (tv2.tv_sec - tv1.tv_sec) * 1000000 
+                                   > 500000) {
+                                       kprintf("cpu %d idlelock %08x %08x\n",
+                                               gd->mi.gd_cpuid,
+                                               gd->mi.gd_runqmask,
+                                               gd->gd_fpending);
+                               }
+#endif
+                       }
+#ifdef SMP
+                       else {
+                           __asm __volatile("pause");
+                       }
+#endif
+                       ++cpu_idle_hltcnt;
+               } else {
+                       td->td_flags &= ~TDF_IDLE_NOHLT;
+                       splz();
+#ifdef SMP
+                       /*__asm __volatile("sti; pause");*/
+                       __asm __volatile("pause");
+#else
+                       /*__asm __volatile("sti");*/
+#endif
+                       ++cpu_idle_spincnt;
+               }
+       }
+}
+
+#ifdef SMP
+
+/*
+ * Called by the LWKT switch core with a critical section held if the only
+ * schedulable thread needs the MP lock and we couldn't get it.  On
+ * a real cpu we just spin in the scheduler.  In the virtual kernel
+ * we sleep for a bit.
+ */
+void
+cpu_mplock_contested(void)
+{
+       usleep(1000);
+}
+
+/*
+ * Called by the spinlock code with or without a critical section held
+ * when a spinlock is found to be seriously constested.
+ */
+void
+cpu_spinlock_contested(void)
+{
+       usleep(1000);
+}
+
+#endif
+
+/*
+ * Clear registers on exec
+ */
+void
+exec_setregs(u_long entry, u_long stack, u_long ps_strings)
+{
+       struct thread *td = curthread;
+       struct lwp *lp = td->td_lwp;
+       struct trapframe *regs = lp->lwp_md.md_regs;
+       struct pcb *pcb = lp->lwp_thread->td_pcb;
+
+       /* was i386_user_cleanup() in NetBSD */
+       user_ldt_free(pcb);
+  
+       bzero((char *)regs, sizeof(struct trapframe));
+       regs->tf_rip = entry;
+       regs->tf_rsp = stack;
+       regs->tf_rflags = PSL_USER | (regs->tf_rflags & PSL_T);
+       regs->tf_ss = 0;
+       /* regs->tf_ds = 0;
+       regs->tf_es = 0;
+       regs->tf_fs = 0;
+       regs->tf_gs = 0; */
+       regs->tf_cs = 0;
+
+       /* PS_STRINGS value for BSD/OS binaries.  It is 0 for non-BSD/OS. */
+       regs->tf_rbx = ps_strings;
+
+        /*
+         * Reset the hardware debug registers if they were in use.
+         * They won't have any meaning for the newly exec'd process.  
+         */
+        if (pcb->pcb_flags & PCB_DBREGS) {
+                pcb->pcb_dr0 = 0;
+                pcb->pcb_dr1 = 0;
+                pcb->pcb_dr2 = 0;
+                pcb->pcb_dr3 = 0;
+                pcb->pcb_dr6 = 0;
+                pcb->pcb_dr7 = 0;
+                if (pcb == td->td_pcb) {
+                       /*
+                        * Clear the debug registers on the running
+                        * CPU, otherwise they will end up affecting
+                        * the next process we switch to.
+                        */
+                       reset_dbregs();
+                }
+                pcb->pcb_flags &= ~PCB_DBREGS;
+        }
+
+       /*
+        * Initialize the math emulator (if any) for the current process.
+        * Actually, just clear the bit that says that the emulator has
+        * been initialized.  Initialization is delayed until the process
+        * traps to the emulator (if it is done at all) mainly because
+        * emulators don't provide an entry point for initialization.
+        */
+       /* pcb->pcb_flags &= ~FP_SOFTFP; */
+
+       /*
+        * note: do not set CR0_TS here.  npxinit() must do it after clearing
+        * gd_npxthread.  Otherwise a preemptive interrupt thread may panic
+        * in npxdna().
+        */
+       crit_enter();
+#if 0
+       load_cr0(rcr0() | CR0_MP);
+#endif
+
+#if NNPX > 0
+       /* Initialize the npx (if any) for the current process. */
+       npxinit(__INITIAL_NPXCW__);
+#endif
+       crit_exit();
+
+       /*
+        * note: linux emulator needs edx to be 0x0 on entry, which is
+        * handled in execve simply by setting the 64 bit syscall
+        * return value to 0.
+        */
+}
+
+void
+cpu_setregs(void)
+{
+#if 0
+       unsigned int cr0;
+
+       cr0 = rcr0();
+       cr0 |= CR0_NE;                  /* Done by npxinit() */
+       cr0 |= CR0_MP | CR0_TS;         /* Done at every execve() too. */
+#ifdef I386_CPU
+       if (cpu_class != CPUCLASS_386)
+#endif
+               cr0 |= CR0_WP | CR0_AM;
+       load_cr0(cr0);
+       load_gs(_udatasel);
+#endif
+}
+
+static int
+sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS)
+{
+       int error;
+       error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2,
+               req);
+       if (!error && req->newptr)
+               resettodr();
+       return (error);
+}
+
+SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW,
+       &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", "");
+
+extern u_long bootdev;         /* not a cdev_t - encoding is different */
+SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev,
+       CTLFLAG_RD, &bootdev, 0, "Boot device (not in cdev_t format)");
+
+/*
+ * Initialize 386 and configure to run kernel
+ */
+
+/*
+ * Initialize segments & interrupt table
+ */
+
+extern  struct user *proc0paddr;
+
+#if 0
+
+extern inthand_t
+       IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
+       IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
+       IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
+       IDTVEC(page), IDTVEC(mchk), IDTVEC(fpu), IDTVEC(align),
+       IDTVEC(xmm), IDTVEC(syscall),
+       IDTVEC(rsvd0);
+extern inthand_t
+       IDTVEC(int0x80_syscall);
+
+#endif
+
+#ifdef DEBUG_INTERRUPTS
+extern inthand_t *Xrsvdary[256];
+#endif
+
+int
+ptrace_set_pc(struct lwp *lp, unsigned long addr)
+{
+       lp->lwp_md.md_regs->tf_rip = addr;
+       return (0);
+}
+
+int
+ptrace_single_step(struct lwp *lp)
+{
+       lp->lwp_md.md_regs->tf_rflags |= PSL_T;
+       return (0);
+}
+
+int
+fill_regs(struct lwp *lp, struct reg *regs)
+{
+       struct trapframe *tp;
+
+       tp = lp->lwp_md.md_regs;
+       /* regs->r_gs = tp->tf_gs;
+       regs->r_fs = tp->tf_fs;
+       regs->r_es = tp->tf_es;
+       regs->r_ds = tp->tf_ds; */
+       regs->r_rdi = tp->tf_rdi;
+       regs->r_rsi = tp->tf_rsi;
+       regs->r_rbp = tp->tf_rbp;
+       regs->r_rbx = tp->tf_rbx;
+       regs->r_rdx = tp->tf_rdx;
+       regs->r_rcx = tp->tf_rcx;
+       regs->r_rax = tp->tf_rax;
+       regs->r_rip = tp->tf_rip;
+       regs->r_cs = tp->tf_cs;
+       regs->r_rflags = tp->tf_rflags;
+       regs->r_rsp = tp->tf_rsp;
+       regs->r_ss = tp->tf_ss;
+       return (0);
+}
+
+int
+set_regs(struct lwp *lp, struct reg *regs)
+{
+       struct trapframe *tp;
+
+       tp = lp->lwp_md.md_regs;
+       if (!EFL_SECURE(regs->r_rflags, tp->tf_rflags) ||
+           !CS_SECURE(regs->r_cs))
+               return (EINVAL);
+       /* tp->tf_gs = regs->r_gs;
+       tp->tf_fs = regs->r_fs;
+       tp->tf_es = regs->r_es;
+       tp->tf_ds = regs->r_ds; */
+       tp->tf_rdi = regs->r_rdi;
+       tp->tf_rsi = regs->r_rsi;
+       tp->tf_rbp = regs->r_rbp;
+       tp->tf_rbx = regs->r_rbx;
+       tp->tf_rdx = regs->r_rdx;
+       tp->tf_rcx = regs->r_rcx;
+       tp->tf_rax = regs->r_rax;
+       tp->tf_rip = regs->r_rip;
+       tp->tf_cs = regs->r_cs;
+       tp->tf_rflags = regs->r_rflags;
+       tp->tf_rsp = regs->r_rsp;
+       tp->tf_ss = regs->r_ss;
+       return (0);
+}
+
+#ifndef CPU_DISABLE_SSE
+static void
+fill_fpregs_xmm(struct savexmm *sv_xmm, struct save87 *sv_87)
+{
+       struct env87 *penv_87 = &sv_87->sv_env;
+       struct envxmm *penv_xmm = &sv_xmm->sv_env;
+       int i;
+
+       /* FPU control/status */
+       penv_87->en_cw = penv_xmm->en_cw;
+       penv_87->en_sw = penv_xmm->en_sw;
+       penv_87->en_tw = penv_xmm->en_tw;
+       penv_87->en_fip = penv_xmm->en_fip;
+       penv_87->en_fcs = penv_xmm->en_fcs;
+       penv_87->en_opcode = penv_xmm->en_opcode;
+       penv_87->en_foo = penv_xmm->en_foo;
+       penv_87->en_fos = penv_xmm->en_fos;
+
+       /* FPU registers */
+       for (i = 0; i < 8; ++i)
+               sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc;
+
+       sv_87->sv_ex_sw = sv_xmm->sv_ex_sw;
+}
+
+static void
+set_fpregs_xmm(struct save87 *sv_87, struct savexmm *sv_xmm)
+{
+       struct env87 *penv_87 = &sv_87->sv_env;
+       struct envxmm *penv_xmm = &sv_xmm->sv_env;
+       int i;
+
+       /* FPU control/status */
+       penv_xmm->en_cw = penv_87->en_cw;
+       penv_xmm->en_sw = penv_87->en_sw;
+       penv_xmm->en_tw = penv_87->en_tw;
+       penv_xmm->en_fip = penv_87->en_fip;
+       penv_xmm->en_fcs = penv_87->en_fcs;
+       penv_xmm->en_opcode = penv_87->en_opcode;
+       penv_xmm->en_foo = penv_87->en_foo;
+       penv_xmm->en_fos = penv_87->en_fos;
+
+       /* FPU registers */
+       for (i = 0; i < 8; ++i)
+               sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i];
+
+       sv_xmm->sv_ex_sw = sv_87->sv_ex_sw;
+}
+#endif /* CPU_DISABLE_SSE */
+
+int
+fill_fpregs(struct lwp *lp, struct fpreg *fpregs)
+{
+#ifndef CPU_DISABLE_SSE
+       if (cpu_fxsr) {
+               fill_fpregs_xmm(&lp->lwp_thread->td_pcb->pcb_save.sv_xmm,
+                               (struct save87 *)fpregs);
+               return (0);
+       }
+#endif /* CPU_DISABLE_SSE */
+       bcopy(&lp->lwp_thread->td_pcb->pcb_save.sv_87, fpregs, sizeof *fpregs);
+       return (0);
+}
+
+int
+set_fpregs(struct lwp *lp, struct fpreg *fpregs)
+{
+#ifndef CPU_DISABLE_SSE
+       if (cpu_fxsr) {
+               set_fpregs_xmm((struct save87 *)fpregs,
+                              &lp->lwp_thread->td_pcb->pcb_save.sv_xmm);
+               return (0);
+       }
+#endif /* CPU_DISABLE_SSE */
+       bcopy(fpregs, &lp->lwp_thread->td_pcb->pcb_save.sv_87, sizeof *fpregs);
+       return (0);
+}
+
+int
+fill_dbregs(struct lwp *lp, struct dbreg *dbregs)
+{
+        if (lp == NULL) {
+                dbregs->dr[0] = rdr0();
+                dbregs->dr[1] = rdr1();
+                dbregs->dr[2] = rdr2();
+                dbregs->dr[3] = rdr3();
+                dbregs->dr[4] = rdr4();
+                dbregs->dr[5] = rdr5();
+                dbregs->dr[6] = rdr6();
+                dbregs->dr[7] = rdr7();
+        } else {
+               struct pcb *pcb;
+
+                pcb = lp->lwp_thread->td_pcb;
+                dbregs->dr[0] = pcb->pcb_dr0;
+                dbregs->dr[1] = pcb->pcb_dr1;
+                dbregs->dr[2] = pcb->pcb_dr2;
+                dbregs->dr[3] = pcb->pcb_dr3;
+                dbregs->dr[4] = 0;
+                dbregs->dr[5] = 0;
+                dbregs->dr[6] = pcb->pcb_dr6;
+                dbregs->dr[7] = pcb->pcb_dr7;
+        }
+       return (0);
+}
+
+int
+set_dbregs(struct lwp *lp, struct dbreg *dbregs)
+{
+       if (lp == NULL) {
+               load_dr0(dbregs->dr[0]);
+               load_dr1(dbregs->dr[1]);
+               load_dr2(dbregs->dr[2]);
+               load_dr3(dbregs->dr[3]);
+               load_dr4(dbregs->dr[4]);
+               load_dr5(dbregs->dr[5]);
+               load_dr6(dbregs->dr[6]);
+               load_dr7(dbregs->dr[7]);
+       } else {
+               struct pcb *pcb;
+               struct ucred *ucred;
+               int i;
+               uint32_t mask1, mask2;
+
+               /*
+                * Don't let an illegal value for dr7 get set.  Specifically,
+                * check for undefined settings.  Setting these bit patterns
+                * result in undefined behaviour and can lead to an unexpected
+                * TRCTRAP.
+                */
+               for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 8; 
+                    i++, mask1 <<= 2, mask2 <<= 2)
+                       if ((dbregs->dr[7] & mask1) == mask2)
+                               return (EINVAL);
+               
+               pcb = lp->lwp_thread->td_pcb;
+               ucred = lp->lwp_proc->p_ucred;
+
+               /*
+                * Don't let a process set a breakpoint that is not within the
+                * process's address space.  If a process could do this, it
+                * could halt the system by setting a breakpoint in the kernel
+                * (if ddb was enabled).  Thus, we need to check to make sure
+                * that no breakpoints are being enabled for addresses outside
+                * process's address space, unless, perhaps, we were called by
+                * uid 0.
+                *
+                * XXX - what about when the watched area of the user's
+                * address space is written into from within the kernel
+                * ... wouldn't that still cause a breakpoint to be generated
+                * from within kernel mode?
+                */
+
+               if (suser_cred(ucred, 0) != 0) {
+                       if (dbregs->dr[7] & 0x3) {
+                               /* dr0 is enabled */
+                               if (dbregs->dr[0] >= VM_MAX_USER_ADDRESS)
+                                       return (EINVAL);
+                       }
+
+                       if (dbregs->dr[7] & (0x3<<2)) {
+                               /* dr1 is enabled */
+                               if (dbregs->dr[1] >= VM_MAX_USER_ADDRESS)
+                                       return (EINVAL);
+                       }
+
+                       if (dbregs->dr[7] & (0x3<<4)) {
+                               /* dr2 is enabled */
+                               if (dbregs->dr[2] >= VM_MAX_USER_ADDRESS)
+                                       return (EINVAL);
+                       }
+
+                       if (dbregs->dr[7] & (0x3<<6)) {
+                               /* dr3 is enabled */
+                               if (dbregs->dr[3] >= VM_MAX_USER_ADDRESS)
+                                       return (EINVAL);
+                       }
+               }
+
+               pcb->pcb_dr0 = dbregs->dr[0];
+               pcb->pcb_dr1 = dbregs->dr[1];
+               pcb->pcb_dr2 = dbregs->dr[2];
+               pcb->pcb_dr3 = dbregs->dr[3];
+               pcb->pcb_dr6 = dbregs->dr[6];
+               pcb->pcb_dr7 = dbregs->dr[7];
+
+               pcb->pcb_flags |= PCB_DBREGS;
+       }
+
+       return (0);
+}
+
+#if 0
+/*
+ * Return > 0 if a hardware breakpoint has been hit, and the
+ * breakpoint was in user space.  Return 0, otherwise.
+ */
+int
+user_dbreg_trap(void)
+{
+        u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */
+        u_int32_t bp;       /* breakpoint bits extracted from dr6 */
+        int nbp;            /* number of breakpoints that triggered */
+        caddr_t addr[4];    /* breakpoint addresses */
+        int i;
+        
+        dr7 = rdr7();
+        if ((dr7 & 0x000000ff) == 0) {
+                /*
+                 * all GE and LE bits in the dr7 register are zero,
+                 * thus the trap couldn't have been caused by the
+                 * hardware debug registers
+                 */
+                return 0;
+        }
+
+        nbp = 0;
+        dr6 = rdr6();
+        bp = dr6 & 0x0000000f;
+
+        if (!bp) {
+                /*
+                 * None of the breakpoint bits are set meaning this
+                 * trap was not caused by any of the debug registers
+                 */
+                return 0;
+        }
+
+        /*
+         * at least one of the breakpoints were hit, check to see
+         * which ones and if any of them are user space addresses
+         */
+
+        if (bp & 0x01) {
+                addr[nbp++] = (caddr_t)rdr0();
+        }
+        if (bp & 0x02) {
+                addr[nbp++] = (caddr_t)rdr1();
+        }
+        if (bp & 0x04) {
+                addr[nbp++] = (caddr_t)rdr2();
+        }
+        if (bp & 0x08) {
+                addr[nbp++] = (caddr_t)rdr3();
+        }
+
+        for (i=0; i<nbp; i++) {
+                if (addr[i] <
+                    (caddr_t)VM_MAX_USER_ADDRESS) {
+                        /*
+                         * addr[i] is in user space
+                         */
+                        return nbp;
+                }
+        }
+
+        /*
+         * None of the breakpoints are in user space.
+         */
+        return 0;
+}
+
+#endif
+
+
+#ifndef DDB
+void
+Debugger(const char *msg)
+{
+       kprintf("Debugger(\"%s\") called.\n", msg);
+}
+#endif /* no DDB */
+
diff --git a/sys/platform/pc64/amd64/db_disasm.c b/sys/platform/pc64/amd64/db_disasm.c
new file mode 100644 (file)
index 0000000..f3749dc
--- /dev/null
@@ -0,0 +1,1427 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+ * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
+ *  School of Computer Science
+ *  Carnegie Mellon University
+ *  Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ *
+ * $FreeBSD: src/sys/i386/i386/db_disasm.c,v 1.23.2.1 2001/07/29 22:48:37 kris Exp $
+ * $DragonFly: src/sys/platform/pc64/amd64/db_disasm.c,v 1.1 2007/09/23 04:29:31 yanyh Exp $
+ * $DragonFly: src/sys/platform/pc64/amd64/db_disasm.c,v 1.1 2007/09/23 04:29:31 yanyh Exp $
+ */
+
+/*
+ * Instruction disassembler.
+ */
+#include <sys/param.h>
+
+#include <ddb/ddb.h>
+#include <ddb/db_access.h>
+#include <ddb/db_sym.h>
+
+/*
+ * Size attributes
+ */
+#define        BYTE    0
+#define        WORD    1
+#define        LONG    2
+#define        QUAD    3
+#define        SNGL    4
+#define        DBLR    5
+#define        EXTR    6
+#define        SDEP    7
+#define        NONE    8
+
+/*
+ * Addressing modes
+ */
+#define        E       1                       /* general effective address */
+#define        Eind    2                       /* indirect address (jump, call) */
+#define        Ew      3                       /* address, word size */
+#define        Eb      4                       /* address, byte size */
+#define        R       5                       /* register, in 'reg' field */
+#define        Rw      6                       /* word register, in 'reg' field */
+#define        Ri      7                       /* register in instruction */
+#define        S       8                       /* segment reg, in 'reg' field */
+#define        Si      9                       /* segment reg, in instruction */
+#define        A       10                      /* accumulator */
+#define        BX      11                      /* (bx) */
+#define        CL      12                      /* cl, for shifts */
+#define        DX      13                      /* dx, for IO */
+#define        SI      14                      /* si */
+#define        DI      15                      /* di */
+#define        CR      16                      /* control register */
+#define        DR      17                      /* debug register */
+#define        TR      18                      /* test register */
+#define        I       19                      /* immediate, unsigned */
+#define        Is      20                      /* immediate, signed */
+#define        Ib      21                      /* byte immediate, unsigned */
+#define        Ibs     22                      /* byte immediate, signed */
+#define        Iw      23                      /* word immediate, unsigned */
+#define        O       25                      /* direct address */
+#define        Db      26                      /* byte displacement from EIP */
+#define        Dl      27                      /* long displacement from EIP */
+#define        o1      28                      /* constant 1 */
+#define        o3      29                      /* constant 3 */
+#define        OS      30                      /* immediate offset/segment */
+#define        ST      31                      /* FP stack top */
+#define        STI     32                      /* FP stack */
+#define        X       33                      /* extended FP op */
+#define        XA      34                      /* for 'fstcw %ax' */
+#define        El      35                      /* address, long size */
+#define        Ril     36                      /* long register in instruction */
+#define        Iba     37                      /* byte immediate, don't print if 0xa */
+
+struct inst {
+       const char *    i_name;         /* name */
+       short   i_has_modrm;            /* has regmodrm byte */
+       short   i_size;                 /* operand size */
+       int     i_mode;                 /* addressing modes */
+       const void *    i_extra;        /* pointer to extra opcode table */
+};
+
+#define        op1(x)          (x)
+#define        op2(x,y)        ((x)|((y)<<8))
+#define        op3(x,y,z)      ((x)|((y)<<8)|((z)<<16))
+
+struct finst {
+       const char *    f_name;         /* name for memory instruction */
+       int     f_size;                 /* size for memory instruction */
+       int     f_rrmode;               /* mode for rr instruction */
+       const void *    f_rrname;       /* name for rr instruction
+                                          (or pointer to table) */
+};
+
+static const char * const db_Grp6[] = {
+       "sldt",
+       "str",
+       "lldt",
+       "ltr",
+       "verr",
+       "verw",
+       "",
+       ""
+};
+
+static const char * const db_Grp7[] = {
+       "sgdt",
+       "sidt",
+       "lgdt",
+       "lidt",
+       "smsw",
+       "",
+       "lmsw",
+       "invlpg"
+};
+
+static const char * const db_Grp8[] = {
+       "",
+       "",
+       "",
+       "",
+       "bt",
+       "bts",
+       "btr",
+       "btc"
+};
+
+static const char * const db_Grp9[] = {
+       "",
+       "cmpxchg8b",
+       "",
+       "",
+       "",
+       "",
+       "",
+       ""
+};
+
+static const struct inst db_inst_0f0x[] = {
+/*00*/ { "",      TRUE,  NONE,  op1(Ew),     db_Grp6 },
+/*01*/ { "",      TRUE,  NONE,  op1(Ew),     db_Grp7 },
+/*02*/ { "lar",   TRUE,  LONG,  op2(E,R),    0 },
+/*03*/ { "lsl",   TRUE,  LONG,  op2(E,R),    0 },
+/*04*/ { "",      FALSE, NONE,  0,           0 },
+/*05*/ { "",      FALSE, NONE,  0,           0 },
+/*06*/ { "clts",  FALSE, NONE,  0,           0 },
+/*07*/ { "",      FALSE, NONE,  0,           0 },
+
+/*08*/ { "invd",  FALSE, NONE,  0,           0 },
+/*09*/ { "wbinvd",FALSE, NONE,  0,           0 },
+/*0a*/ { "",      FALSE, NONE,  0,           0 },
+/*0b*/ { "",      FALSE, NONE,  0,           0 },
+/*0c*/ { "",      FALSE, NONE,  0,           0 },
+/*0d*/ { "",      FALSE, NONE,  0,           0 },
+/*0e*/ { "",      FALSE, NONE,  0,           0 },
+/*0f*/ { "",      FALSE, NONE,  0,           0 },
+};
+
+static const struct inst db_inst_0f2x[] = {
+/*20*/ { "mov",   TRUE,  LONG,  op2(CR,El),  0 },
+/*21*/ { "mov",   TRUE,  LONG,  op2(DR,El),  0 },
+/*22*/ { "mov",   TRUE,  LONG,  op2(El,CR),  0 },
+/*23*/ { "mov",   TRUE,  LONG,  op2(El,DR),  0 },
+/*24*/ { "mov",   TRUE,  LONG,  op2(TR,El),  0 },
+/*25*/ { "",      FALSE, NONE,  0,           0 },
+/*26*/ { "mov",   TRUE,  LONG,  op2(El,TR),  0 },
+/*27*/ { "",      FALSE, NONE,  0,           0 },
+
+/*28*/ { "",      FALSE, NONE,  0,           0 },
+/*29*/ { "",      FALSE, NONE,  0,           0 },
+/*2a*/ { "",      FALSE, NONE,  0,           0 },
+/*2b*/ { "",      FALSE, NONE,  0,           0 },
+/*2c*/ { "",      FALSE, NONE,  0,           0 },
+/*2d*/ { "",      FALSE, NONE,  0,           0 },
+/*2e*/ { "",      FALSE, NONE,  0,           0 },
+/*2f*/ { "",      FALSE, NONE,  0,           0 },
+};
+
+static const struct inst db_inst_0f3x[] = {
+/*30*/ { "wrmsr", FALSE, NONE,  0,           0 },
+/*31*/ { "rdtsc", FALSE, NONE,  0,           0 },
+/*32*/ { "rdmsr", FALSE, NONE,  0,           0 },
+/*33*/ { "rdpmc", FALSE, NONE,  0,           0 },
+/*34*/ { "",      FALSE, NONE,  0,           0 },
+/*35*/ { "",      FALSE, NONE,  0,           0 },
+/*36*/ { "",      FALSE, NONE,  0,           0 },
+/*37*/ { "",      FALSE, NONE,  0,           0 },
+
+/*38*/ { "",      FALSE, NONE,  0,           0 },
+/*39*/ { "",      FALSE, NONE,  0,           0 },
+/*3a*/ { "",      FALSE, NONE,  0,           0 },
+/*3b*/ { "",      FALSE, NONE,  0,           0 },
+/*3c*/ { "",      FALSE, NONE,  0,           0 },
+/*3d*/ { "",      FALSE, NONE,  0,           0 },
+/*3e*/ { "",      FALSE, NONE,  0,           0 },
+/*3f*/ { "",      FALSE, NONE,  0,           0 },
+};
+
+static const struct inst db_inst_0f8x[] = {
+/*80*/ { "jo",    FALSE, NONE,  op1(Dl),     0 },
+/*81*/ { "jno",   FALSE, NONE,  op1(Dl),     0 },
+/*82*/ { "jb",    FALSE, NONE,  op1(Dl),     0 },
+/*83*/ { "jnb",   FALSE, NONE,  op1(Dl),     0 },
+/*84*/ { "jz",    FALSE, NONE,  op1(Dl),     0 },
+/*85*/ { "jnz",   FALSE, NONE,  op1(Dl),     0 },
+/*86*/ { "jbe",   FALSE, NONE,  op1(Dl),     0 },
+/*87*/ { "jnbe",  FALSE, NONE,  op1(Dl),     0 },
+
+/*88*/ { "js",    FALSE, NONE,  op1(Dl),     0 },
+/*89*/ { "jns",   FALSE, NONE,  op1(Dl),     0 },
+/*8a*/ { "jp",    FALSE, NONE,  op1(Dl),     0 },
+/*8b*/ { "jnp",   FALSE, NONE,  op1(Dl),     0 },
+/*8c*/ { "jl",    FALSE, NONE,  op1(Dl),     0 },
+/*8d*/ { "jnl",   FALSE, NONE,  op1(Dl),     0 },
+/*8e*/ { "jle",   FALSE, NONE,  op1(Dl),     0 },
+/*8f*/ { "jnle",  FALSE, NONE,  op1(Dl),     0 },
+};
+
+static const struct inst db_inst_0f9x[] = {
+/*90*/ { "seto",  TRUE,  NONE,  op1(Eb),     0 },
+/*91*/ { "setno", TRUE,  NONE,  op1(Eb),     0 },
+/*92*/ { "setb",  TRUE,  NONE,  op1(Eb),     0 },
+/*93*/ { "setnb", TRUE,  NONE,  op1(Eb),     0 },
+/*94*/ { "setz",  TRUE,  NONE,  op1(Eb),     0 },
+/*95*/ { "setnz", TRUE,  NONE,  op1(Eb),     0 },
+/*96*/ { "setbe", TRUE,  NONE,  op1(Eb),     0 },
+/*97*/ { "setnbe",TRUE,  NONE,  op1(Eb),     0 },
+
+/*98*/ { "sets",  TRUE,  NONE,  op1(Eb),     0 },
+/*99*/ { "setns", TRUE,  NONE,  op1(Eb),     0 },
+/*9a*/ { "setp",  TRUE,  NONE,  op1(Eb),     0 },
+/*9b*/ { "setnp", TRUE,  NONE,  op1(Eb),     0 },
+/*9c*/ { "setl",  TRUE,  NONE,  op1(Eb),     0 },
+/*9d*/ { "setnl", TRUE,  NONE,  op1(Eb),     0 },
+/*9e*/ { "setle", TRUE,  NONE,  op1(Eb),     0 },
+/*9f*/ { "setnle",TRUE,  NONE,  op1(Eb),     0 },
+};
+
+static const struct inst db_inst_0fax[] = {
+/*a0*/ { "push",  FALSE, NONE,  op1(Si),     0 },
+/*a1*/ { "pop",   FALSE, NONE,  op1(Si),     0 },
+/*a2*/ { "cpuid", FALSE, NONE,  0,           0 },
+/*a3*/ { "bt",    TRUE,  LONG,  op2(R,E),    0 },
+/*a4*/ { "shld",  TRUE,  LONG,  op3(Ib,R,E), 0 },
+/*a5*/ { "shld",  TRUE,  LONG,  op3(CL,R,E), 0 },
+/*a6*/ { "",      FALSE, NONE,  0,           0 },
+/*a7*/ { "",      FALSE, NONE,  0,           0 },
+
+/*a8*/ { "push",  FALSE, NONE,  op1(Si),     0 },
+/*a9*/ { "pop",   FALSE, NONE,  op1(Si),     0 },
+/*aa*/ { "rsm",   FALSE, NONE,  0,           0 },
+/*ab*/ { "bts",   TRUE,  LONG,  op2(R,E),    0 },
+/*ac*/ { "shrd",  TRUE,  LONG,  op3(Ib,R,E), 0 },
+/*ad*/ { "shrd",  TRUE,  LONG,  op3(CL,R,E), 0 },
+/*a6*/ { "",      FALSE, NONE,  0,           0 },
+/*a7*/ { "imul",  TRUE,  LONG,  op2(E,R),    0 },
+};
+
+static const struct inst db_inst_0fbx[] = {
+/*b0*/ { "cmpxchg",TRUE, BYTE,  op2(R, E),   0 },
+/*b0*/ { "cmpxchg",TRUE, LONG,  op2(R, E),   0 },
+/*b2*/ { "lss",   TRUE,  LONG,  op2(E, R),   0 },
+/*b3*/ { "btr",   TRUE,  LONG,  op2(R, E),   0 },
+/*b4*/ { "lfs",   TRUE,  LONG,  op2(E, R),   0 },
+/*b5*/ { "lgs",   TRUE,  LONG,  op2(E, R),   0 },
+/*b6*/ { "movzb", TRUE,  LONG,  op2(Eb, R),  0 },
+/*b7*/ { "movzw", TRUE,  LONG,  op2(Ew, R),  0 },
+
+/*b8*/ { "",      FALSE, NONE,  0,           0 },
+/*b9*/ { "",      FALSE, NONE,  0,           0 },
+/*ba*/ { "",      TRUE,  LONG,  op2(Ib, E),  db_Grp8 },
+/*bb*/ { "btc",   TRUE,  LONG,  op2(R, E),   0 },
+/*bc*/ { "bsf",   TRUE,  LONG,  op2(E, R),   0 },
+/*bd*/ { "bsr",   TRUE,  LONG,  op2(E, R),   0 },
+/*be*/ { "movsb", TRUE,  LONG,  op2(Eb, R),  0 },
+/*bf*/ { "movsw", TRUE,  LONG,  op2(Ew, R),  0 },
+};
+
+static const struct inst db_inst_0fcx[] = {
+/*c0*/ { "xadd",  TRUE,  BYTE,  op2(R, E),   0 },
+/*c1*/ { "xadd",  TRUE,  LONG,  op2(R, E),   0 },
+/*c2*/ { "",      FALSE, NONE,  0,           0 },
+/*c3*/ { "",      FALSE, NONE,  0,           0 },
+/*c4*/ { "",      FALSE, NONE,  0,           0 },
+/*c5*/ { "",      FALSE, NONE,  0,           0 },
+/*c6*/ { "",      FALSE, NONE,  0,           0 },
+/*c7*/ { "",      TRUE,  NONE,  op1(E),      db_Grp9 },
+/*c8*/ { "bswap", FALSE, LONG,  op1(Ril),    0 },
+/*c9*/ { "bswap", FALSE, LONG,  op1(Ril),    0 },
+/*ca*/ { "bswap", FALSE, LONG,  op1(Ril),    0 },
+/*cb*/ { "bswap", FALSE, LONG,  op1(Ril),    0 },
+/*cc*/ { "bswap", FALSE, LONG,  op1(Ril),    0 },
+/*cd*/ { "bswap", FALSE, LONG,  op1(Ril),    0 },
+/*ce*/ { "bswap", FALSE, LONG,  op1(Ril),    0 },
+/*cf*/ { "bswap", FALSE, LONG,  op1(Ril),    0 },
+};
+
+static const struct inst * const db_inst_0f[] = {
+       db_inst_0f0x,
+       0,
+       db_inst_0f2x,
+       db_inst_0f3x,
+       0,
+       0,
+       0,
+       0,
+       db_inst_0f8x,
+       db_inst_0f9x,
+       db_inst_0fax,
+       db_inst_0fbx,
+       db_inst_0fcx,
+       0,
+       0,
+       0
+};
+
+static const char * const db_Esc92[] = {
+       "fnop", "",     "",     "",     "",     "",     "",     ""
+};
+static const char * const db_Esc94[] = {
+       "fchs", "fabs", "",     "",     "ftst", "fxam", "",     ""
+};
+static const char * const db_Esc95[] = {
+       "fld1", "fldl2t","fldl2e","fldpi","fldlg2","fldln2","fldz",""
+};
+static const char * const db_Esc96[] = {
+       "f2xm1","fyl2x","fptan","fpatan","fxtract","fprem1","fdecstp",
+       "fincstp"
+};
+static const char * const db_Esc97[] = {
+       "fprem","fyl2xp1","fsqrt","fsincos","frndint","fscale","fsin","fcos"
+};
+
+static const char * const db_Esca5[] = {
+       "",     "fucompp","",   "",     "",     "",     "",     ""
+};
+
+static const char * const db_Escb4[] = {
+       "fneni","fndisi",       "fnclex","fninit","fsetpm",     "",     "",     ""
+};
+
+static const char * const db_Esce3[] = {
+       "",     "fcompp","",    "",     "",     "",     "",     ""
+};
+
+static const char * const db_Escf4[] = {
+       "fnstsw","",    "",     "",     "",     "",     "",     ""
+};
+
+static const struct finst db_Esc8[] = {
+/*0*/  { "fadd",   SNGL,  op2(STI,ST), 0 },
+/*1*/  { "fmul",   SNGL,  op2(STI,ST), 0 },
+/*2*/  { "fcom",   SNGL,  op2(STI,ST), 0 },
+/*3*/  { "fcomp",  SNGL,  op2(STI,ST), 0 },
+/*4*/  { "fsub",   SNGL,  op2(STI,ST), 0 },
+/*5*/  { "fsubr",  SNGL,  op2(STI,ST), 0 },
+/*6*/  { "fdiv",   SNGL,  op2(STI,ST), 0 },
+/*7*/  { "fdivr",  SNGL,  op2(STI,ST), 0 },
+};
+
+static const struct finst db_Esc9[] = {
+/*0*/  { "fld",    SNGL,  op1(STI),    0 },
+/*1*/  { "",       NONE,  op1(STI),    "fxch" },
+/*2*/  { "fst",    SNGL,  op1(X),      db_Esc92 },
+/*3*/  { "fstp",   SNGL,  0,           0 },
+/*4*/  { "fldenv", NONE,  op1(X),      db_Esc94 },
+/*5*/  { "fldcw",  NONE,  op1(X),      db_Esc95 },
+/*6*/  { "fnstenv",NONE,  op1(X),      db_Esc96 },
+/*7*/  { "fnstcw", NONE,  op1(X),      db_Esc97 },
+};
+
+static const struct finst db_Esca[] = {
+/*0*/  { "fiadd",  LONG,  0,           0 },
+/*1*/  { "fimul",  LONG,  0,           0 },
+/*2*/  { "ficom",  LONG,  0,           0 },
+/*3*/  { "ficomp", LONG,  0,           0 },
+/*4*/  { "fisub",  LONG,  0,           0 },
+/*5*/  { "fisubr", LONG,  op1(X),      db_Esca5 },
+/*6*/  { "fidiv",  LONG,  0,           0 },
+/*7*/  { "fidivr", LONG,  0,           0 }
+};
+
+static const struct finst db_Escb[] = {
+/*0*/  { "fild",   LONG,  0,           0 },
+/*1*/  { "",       NONE,  0,           0 },
+/*2*/  { "fist",   LONG,  0,           0 },
+/*3*/  { "fistp",  LONG,  0,           0 },
+/*4*/  { "",       WORD,  op1(X),      db_Escb4 },
+/*5*/  { "fld",    EXTR,  0,           0 },
+/*6*/  { "",       WORD,  0,           0 },
+/*7*/  { "fstp",   EXTR,  0,           0 },
+};
+
+static const struct finst db_Escc[] = {
+/*0*/  { "fadd",   DBLR,  op2(ST,STI), 0 },
+/*1*/  { "fmul",   DBLR,  op2(ST,STI), 0 },
+/*2*/  { "fcom",   DBLR,  0,           0 },
+/*3*/  { "fcomp",  DBLR,  0,           0 },
+/*4*/  { "fsub",   DBLR,  op2(ST,STI), "fsubr" },
+/*5*/  { "fsubr",  DBLR,  op2(ST,STI), "fsub" },
+/*6*/  { "fdiv",   DBLR,  op2(ST,STI), "fdivr" },
+/*7*/  { "fdivr",  DBLR,  op2(ST,STI), "fdiv" },
+};
+
+static const struct finst db_Escd[] = {
+/*0*/  { "fld",    DBLR,  op1(STI),    "ffree" },
+/*1*/  { "",       NONE,  0,           0 },
+/*2*/  { "fst",    DBLR,  op1(STI),    0 },
+/*3*/  { "fstp",   DBLR,  op1(STI),    0 },
+/*4*/  { "frstor", NONE,  op1(STI),    "fucom" },
+/*5*/  { "",       NONE,  op1(STI),    "fucomp" },
+/*6*/  { "fnsave", NONE,  0,           0 },
+/*7*/  { "fnstsw", NONE,  0,           0 },
+};
+
+static const struct finst db_Esce[] = {
+/*0*/  { "fiadd",  WORD,  op2(ST,STI), "faddp" },
+/*1*/  { "fimul",  WORD,  op2(ST,STI), "fmulp" },
+/*2*/  { "ficom",  WORD,  0,           0 },
+/*3*/  { "ficomp", WORD,  op1(X),      db_Esce3 },
+/*4*/  { "fisub",  WORD,  op2(ST,STI), "fsubrp" },
+/*5*/  { "fisubr", WORD,  op2(ST,STI), "fsubp" },
+/*6*/  { "fidiv",  WORD,  op2(ST,STI), "fdivrp" },
+/*7*/  { "fidivr", WORD,  op2(ST,STI), "fdivp" },
+};
+
+static const struct finst db_Escf[] = {
+/*0*/  { "fild",   WORD,  0,           0 },
+/*1*/  { "",       NONE,  0,           0 },
+/*2*/  { "fist",   WORD,  0,           0 },
+/*3*/  { "fistp",  WORD,  0,           0 },
+/*4*/  { "fbld",   NONE,  op1(XA),     db_Escf4 },
+/*5*/  { "fild",   QUAD,  0,           0 },
+/*6*/  { "fbstp",  NONE,  0,           0 },
+/*7*/  { "fistp",  QUAD,  0,           0 },
+};
+
+static const struct finst * const db_Esc_inst[] = {
+       db_Esc8, db_Esc9, db_Esca, db_Escb,
+       db_Escc, db_Escd, db_Esce, db_Escf
+};
+
+static const char * const db_Grp1[] = {
+       "add",
+       "or",
+       "adc",
+       "sbb",
+       "and",
+       "sub",
+       "xor",
+       "cmp"
+};
+
+static const char * const db_Grp2[] = {
+       "rol",
+       "ror",
+       "rcl",
+       "rcr",
+       "shl",
+       "shr",
+       "shl",
+       "sar"
+};
+
+static const struct inst db_Grp3[] = {
+       { "test",  TRUE, NONE, op2(I,E), 0 },
+       { "test",  TRUE, NONE, op2(I,E), 0 },
+       { "not",   TRUE, NONE, op1(E),   0 },
+       { "neg",   TRUE, NONE, op1(E),   0 },
+       { "mul",   TRUE, NONE, op2(E,A), 0 },
+       { "imul",  TRUE, NONE, op2(E,A), 0 },
+       { "div",   TRUE, NONE, op2(E,A), 0 },
+       { "idiv",  TRUE, NONE, op2(E,A), 0 },
+};
+
+static const struct inst db_Grp4[] = {
+       { "inc",   TRUE, BYTE, op1(E),   0 },
+       { "dec",   TRUE, BYTE, op1(E),   0 },
+       { "",      TRUE, NONE, 0,        0 },
+       { "",      TRUE, NONE, 0,        0 },
+       { "",      TRUE, NONE, 0,        0 },
+       { "",      TRUE, NONE, 0,        0 },
+       { "",      TRUE, NONE, 0,        0 },
+       { "",      TRUE, NONE, 0,        0 }
+};
+
+static const struct inst db_Grp5[] = {
+       { "inc",   TRUE, LONG, op1(E),   0 },
+       { "dec",   TRUE, LONG, op1(E),   0 },
+       { "call",  TRUE, LONG, op1(Eind),0 },
+       { "lcall", TRUE, LONG, op1(Eind),0 },
+       { "jmp",   TRUE, LONG, op1(Eind),0 },
+       { "ljmp",  TRUE, LONG, op1(Eind),0 },
+       { "push",  TRUE, LONG, op1(E),   0 },
+       { "",      TRUE, NONE, 0,        0 }
+};
+
+static const struct inst db_inst_table[256] = {
+/*00*/ { "add",   TRUE,  BYTE,  op2(R, E),  0 },
+/*01*/ { "add",   TRUE,  LONG,  op2(R, E),  0 },
+/*02*/ { "add",   TRUE,  BYTE,  op2(E, R),  0 },
+/*03*/ { "add",   TRUE,  LONG,  op2(E, R),  0 },
+/*04*/ { "add",   FALSE, BYTE,  op2(I, A),  0 },
+/*05*/ { "add",   FALSE, LONG,  op2(Is, A), 0 },
+/*06*/ { "push",  FALSE, NONE,  op1(Si),    0 },
+/*07*/ { "pop",   FALSE, NONE,  op1(Si),    0 },
+
+/*08*/ { "or",    TRUE,  BYTE,  op2(R, E),  0 },
+/*09*/ { "or",    TRUE,  LONG,  op2(R, E),  0 },
+/*0a*/ { "or",    TRUE,  BYTE,  op2(E, R),  0 },
+/*0b*/ { "or",    TRUE,  LONG,  op2(E, R),  0 },
+/*0c*/ { "or",    FALSE, BYTE,  op2(I, A),  0 },
+/*0d*/ { "or",    FALSE, LONG,  op2(I, A),  0 },
+/*0e*/ { "push",  FALSE, NONE,  op1(Si),    0 },
+/*0f*/ { "",      FALSE, NONE,  0,          0 },
+
+/*10*/ { "adc",   TRUE,  BYTE,  op2(R, E),  0 },
+/*11*/ { "adc",   TRUE,  LONG,  op2(R, E),  0 },
+/*12*/ { "adc",   TRUE,  BYTE,  op2(E, R),  0 },
+/*13*/ { "adc",   TRUE,  LONG,  op2(E, R),  0 },
+/*14*/ { "adc",   FALSE, BYTE,  op2(I, A),  0 },
+/*15*/ { "adc",   FALSE, LONG,  op2(Is, A), 0 },
+/*16*/ { "push",  FALSE, NONE,  op1(Si),    0 },
+/*17*/ { "pop",   FALSE, NONE,  op1(Si),    0 },
+
+/*18*/ { "sbb",   TRUE,  BYTE,  op2(R, E),  0 },
+/*19*/ { "sbb",   TRUE,  LONG,  op2(R, E),  0 },
+/*1a*/ { "sbb",   TRUE,  BYTE,  op2(E, R),  0 },
+/*1b*/ { "sbb",   TRUE,  LONG,  op2(E, R),  0 },
+/*1c*/ { "sbb",   FALSE, BYTE,  op2(I, A),  0 },
+/*1d*/ { "sbb",   FALSE, LONG,  op2(Is, A), 0 },
+/*1e*/ { "push",  FALSE, NONE,  op1(Si),    0 },
+/*1f*/ { "pop",   FALSE, NONE,  op1(Si),    0 },
+
+/*20*/ { "and",   TRUE,  BYTE,  op2(R, E),  0 },
+/*21*/ { "and",   TRUE,  LONG,  op2(R, E),  0 },
+/*22*/ { "and",   TRUE,  BYTE,  op2(E, R),  0 },
+/*23*/ { "and",   TRUE,  LONG,  op2(E, R),  0 },
+/*24*/ { "and",   FALSE, BYTE,  op2(I, A),  0 },
+/*25*/ { "and",   FALSE, LONG,  op2(I, A),  0 },
+/*26*/ { "",      FALSE, NONE,  0,          0 },
+/*27*/ { "daa",   FALSE, NONE,  0,          0 },
+
+/*28*/ { "sub",   TRUE,  BYTE,  op2(R, E),  0 },
+/*29*/ { "sub",   TRUE,  LONG,  op2(R, E),  0 },
+/*2a*/ { "sub",   TRUE,  BYTE,  op2(E, R),  0 },
+/*2b*/ { "sub",   TRUE,  LONG,  op2(E, R),  0 },
+/*2c*/ { "sub",   FALSE, BYTE,  op2(I, A),  0 },
+/*2d*/ { "sub",   FALSE, LONG,  op2(Is, A), 0 },
+/*2e*/ { "",      FALSE, NONE,  0,          0 },
+/*2f*/ { "das",   FALSE, NONE,  0,          0 },
+
+/*30*/ { "xor",   TRUE,  BYTE,  op2(R, E),  0 },
+/*31*/ { "xor",   TRUE,  LONG,  op2(R, E),  0 },
+/*32*/ { "xor",   TRUE,  BYTE,  op2(E, R),  0 },
+/*33*/ { "xor",   TRUE,  LONG,  op2(E, R),  0 },
+/*34*/ { "xor",   FALSE, BYTE,  op2(I, A),  0 },
+/*35*/ { "xor",   FALSE, LONG,  op2(I, A),  0 },
+/*36*/ { "",      FALSE, NONE,  0,          0 },
+/*37*/ { "aaa",   FALSE, NONE,  0,          0 },
+
+/*38*/ { "cmp",   TRUE,  BYTE,  op2(R, E),  0 },
+/*39*/ { "cmp",   TRUE,  LONG,  op2(R, E),  0 },
+/*3a*/ { "cmp",   TRUE,  BYTE,  op2(E, R),  0 },
+/*3b*/ { "cmp",   TRUE,  LONG,  op2(E, R),  0 },
+/*3c*/ { "cmp",   FALSE, BYTE,  op2(I, A),  0 },
+/*3d*/ { "cmp",   FALSE, LONG,  op2(Is, A), 0 },
+/*3e*/ { "",      FALSE, NONE,  0,          0 },
+/*3f*/ { "aas",   FALSE, NONE,  0,          0 },
+
+/*40*/ { "inc",   FALSE, LONG,  op1(Ri),    0 },
+/*41*/ { "inc",   FALSE, LONG,  op1(Ri),    0 },
+/*42*/ { "inc",   FALSE, LONG,  op1(Ri),    0 },
+/*43*/ { "inc",   FALSE, LONG,  op1(Ri),    0 },
+/*44*/ { "inc",   FALSE, LONG,  op1(Ri),    0 },
+/*45*/ { "inc",   FALSE, LONG,  op1(Ri),    0 },
+/*46*/ { "inc",   FALSE, LONG,  op1(Ri),    0 },
+/*47*/ { "inc",   FALSE, LONG,  op1(Ri),    0 },
+
+/*48*/ { "dec",   FALSE, LONG,  op1(Ri),    0 },
+/*49*/ { "dec",   FALSE, LONG,  op1(Ri),    0 },
+/*4a*/ { "dec",   FALSE, LONG,  op1(Ri),    0 },
+/*4b*/ { "dec",   FALSE, LONG,  op1(Ri),    0 },
+/*4c*/ { "dec",   FALSE, LONG,  op1(Ri),    0 },
+/*4d*/ { "dec",   FALSE, LONG,  op1(Ri),    0 },
+/*4e*/ { "dec",   FALSE, LONG,  op1(Ri),    0 },
+/*4f*/ { "dec",   FALSE, LONG,  op1(Ri),    0 },
+
+/*50*/ { "push",  FALSE, LONG,  op1(Ri),    0 },
+/*51*/ { "push",  FALSE, LONG,  op1(Ri),    0 },
+/*52*/ { "push",  FALSE, LONG,  op1(Ri),    0 },
+/*53*/ { "push",  FALSE, LONG,  op1(Ri),    0 },
+/*54*/ { "push",  FALSE, LONG,  op1(Ri),    0 },
+/*55*/ { "push",  FALSE, LONG,  op1(Ri),    0 },
+/*56*/ { "push",  FALSE, LONG,  op1(Ri),    0 },
+/*57*/ { "push",  FALSE, LONG,  op1(Ri),    0 },
+
+/*58*/ { "pop",   FALSE, LONG,  op1(Ri),    0 },
+/*59*/ { "pop",   FALSE, LONG,  op1(Ri),    0 },
+/*5a*/ { "pop",   FALSE, LONG,  op1(Ri),    0 },
+/*5b*/ { "pop",   FALSE, LONG,  op1(Ri),    0 },
+/*5c*/ { "pop",   FALSE, LONG,  op1(Ri),    0 },
+/*5d*/ { "pop",   FALSE, LONG,  op1(Ri),    0 },
+/*5e*/ { "pop",   FALSE, LONG,  op1(Ri),    0 },
+/*5f*/ { "pop",   FALSE, LONG,  op1(Ri),    0 },
+
+/*60*/ { "pusha", FALSE, LONG,  0,          0 },
+/*61*/ { "popa",  FALSE, LONG,  0,          0 },
+/*62*/  { "bound", TRUE,  LONG,  op2(E, R),  0 },
+/*63*/ { "arpl",  TRUE,  NONE,  op2(Rw,Ew), 0 },
+
+/*64*/ { "",      FALSE, NONE,  0,          0 },
+/*65*/ { "",      FALSE, NONE,  0,          0 },
+/*66*/ { "",      FALSE, NONE,  0,          0 },
+/*67*/ { "",      FALSE, NONE,  0,          0 },
+
+/*68*/ { "push",  FALSE, LONG,  op1(I),     0 },
+/*69*/  { "imul",  TRUE,  LONG,  op3(I,E,R), 0 },
+/*6a*/ { "push",  FALSE, LONG,  op1(Ibs),   0 },
+/*6b*/  { "imul",  TRUE,  LONG,  op3(Ibs,E,R),0 },
+/*6c*/ { "ins",   FALSE, BYTE,  op2(DX, DI), 0 },
+/*6d*/ { "ins",   FALSE, LONG,  op2(DX, DI), 0 },
+/*6e*/ { "outs",  FALSE, BYTE,  op2(SI, DX), 0 },
+/*6f*/ { "outs",  FALSE, LONG,  op2(SI, DX), 0 },
+
+/*70*/ { "jo",    FALSE, NONE,  op1(Db),     0 },
+/*71*/ { "jno",   FALSE, NONE,  op1(Db),     0 },
+/*72*/ { "jb",    FALSE, NONE,  op1(Db),     0 },
+/*73*/ { "jnb",   FALSE, NONE,  op1(Db),     0 },
+/*74*/ { "jz",    FALSE, NONE,  op1(Db),     0 },
+/*75*/ { "jnz",   FALSE, NONE,  op1(Db),     0 },
+/*76*/ { "jbe",   FALSE, NONE,  op1(Db),     0 },
+/*77*/ { "jnbe",  FALSE, NONE,  op1(Db),     0 },
+
+/*78*/ { "js",    FALSE, NONE,  op1(Db),     0 },
+/*79*/ { "jns",   FALSE, NONE,  op1(Db),     0 },
+/*7a*/ { "jp",    FALSE, NONE,  op1(Db),     0 },
+/*7b*/ { "jnp",   FALSE, NONE,  op1(Db),     0 },
+/*7c*/ { "jl",    FALSE, NONE,  op1(Db),     0 },
+/*7d*/ { "jnl",   FALSE, NONE,  op1(Db),     0 },
+/*7e*/ { "jle",   FALSE, NONE,  op1(Db),     0 },
+/*7f*/ { "jnle",  FALSE, NONE,  op1(Db),     0 },
+
+/*80*/  { "",     TRUE,  BYTE,  op2(I, E),   db_Grp1 },
+/*81*/  { "",     TRUE,  LONG,  op2(I, E),   db_Grp1 },
+/*82*/  { "",     TRUE,  BYTE,  op2(I, E),   db_Grp1 },
+/*83*/  { "",     TRUE,  LONG,  op2(Ibs,E),  db_Grp1 },
+/*84*/ { "test",  TRUE,  BYTE,  op2(R, E),   0 },
+/*85*/ { "test",  TRUE,  LONG,  op2(R, E),   0 },
+/*86*/ { "xchg",  TRUE,  BYTE,  op2(R, E),   0 },
+/*87*/ { "xchg",  TRUE,  LONG,  op2(R, E),   0 },
+
+/*88*/ { "mov",   TRUE,  BYTE,  op2(R, E),   0 },
+/*89*/ { "mov",   TRUE,  LONG,  op2(R, E),   0 },
+/*8a*/ { "mov",   TRUE,  BYTE,  op2(E, R),   0 },
+/*8b*/ { "mov",   TRUE,  LONG,  op2(E, R),   0 },
+/*8c*/  { "mov",   TRUE,  NONE,  op2(S, Ew),  0 },
+/*8d*/ { "lea",   TRUE,  LONG,  op2(E, R),   0 },
+/*8e*/ { "mov",   TRUE,  NONE,  op2(Ew, S),  0 },
+/*8f*/ { "pop",   TRUE,  LONG,  op1(E),      0 },
+
+/*90*/ { "nop",   FALSE, NONE,  0,           0 },
+/*91*/ { "xchg",  FALSE, LONG,  op2(A, Ri),  0 },
+/*92*/ { "xchg",  FALSE, LONG,  op2(A, Ri),  0 },
+/*93*/ { "xchg",  FALSE, LONG,  op2(A, Ri),  0 },
+/*94*/ { "xchg",  FALSE, LONG,  op2(A, Ri),  0 },
+/*95*/ { "xchg",  FALSE, LONG,  op2(A, Ri),  0 },
+/*96*/ { "xchg",  FALSE, LONG,  op2(A, Ri),  0 },
+/*97*/ { "xchg",  FALSE, LONG,  op2(A, Ri),  0 },
+
+/*98*/ { "cbw",   FALSE, SDEP,  0,           "cwde" }, /* cbw/cwde */
+/*99*/ { "cwd",   FALSE, SDEP,  0,           "cdq"  }, /* cwd/cdq */
+/*9a*/ { "lcall", FALSE, NONE,  op1(OS),     0 },
+/*9b*/ { "wait",  FALSE, NONE,  0,           0 },
+/*9c*/ { "pushf", FALSE, LONG,  0,           0 },
+/*9d*/ { "popf",  FALSE, LONG,  0,           0 },
+/*9e*/ { "sahf",  FALSE, NONE,  0,           0 },
+/*9f*/ { "lahf",  FALSE, NONE,  0,           0 },
+
+/*a0*/ { "mov",   FALSE, BYTE,  op2(O, A),   0 },
+/*a1*/ { "mov",   FALSE, LONG,  op2(O, A),   0 },
+/*a2*/ { "mov",   FALSE, BYTE,  op2(A, O),   0 },
+/*a3*/ { "mov",   FALSE, LONG,  op2(A, O),   0 },
+/*a4*/ { "movs",  FALSE, BYTE,  op2(SI,DI),  0 },
+/*a5*/ { "movs",  FALSE, LONG,  op2(SI,DI),  0 },
+/*a6*/ { "cmps",  FALSE, BYTE,  op2(SI,DI),  0 },
+/*a7*/ { "cmps",  FALSE, LONG,  op2(SI,DI),  0 },
+
+/*a8*/ { "test",  FALSE, BYTE,  op2(I, A),   0 },
+/*a9*/ { "test",  FALSE, LONG,  op2(I, A),   0 },
+/*aa*/ { "stos",  FALSE, BYTE,  op1(DI),     0 },
+/*ab*/ { "stos",  FALSE, LONG,  op1(DI),     0 },
+/*ac*/ { "lods",  FALSE, BYTE,  op1(SI),     0 },
+/*ad*/ { "lods",  FALSE, LONG,  op1(SI),     0 },
+/*ae*/ { "scas",  FALSE, BYTE,  op1(SI),     0 },
+/*af*/ { "scas",  FALSE, LONG,  op1(SI),     0 },
+
+/*b0*/ { "mov",   FALSE, BYTE,  op2(I, Ri),  0 },
+/*b1*/ { "mov",   FALSE, BYTE,  op2(I, Ri),  0 },
+/*b2*/ { "mov",   FALSE, BYTE,  op2(I, Ri),  0 },
+/*b3*/ { "mov",   FALSE, BYTE,  op2(I, Ri),  0 },
+/*b4*/ { "mov",   FALSE, BYTE,  op2(I, Ri),  0 },
+/*b5*/ { "mov",   FALSE, BYTE,  op2(I, Ri),  0 },
+/*b6*/ { "mov",   FALSE, BYTE,  op2(I, Ri),  0 },
+/*b7*/ { "mov",   FALSE, BYTE,  op2(I, Ri),  0 },
+
+/*b8*/ { "mov",   FALSE, LONG,  op2(I, Ri),  0 },
+/*b9*/ { "mov",   FALSE, LONG,  op2(I, Ri),  0 },
+/*ba*/ { "mov",   FALSE, LONG,  op2(I, Ri),  0 },
+/*bb*/ { "mov",   FALSE, LONG,  op2(I, Ri),  0 },
+/*bc*/ { "mov",   FALSE, LONG,  op2(I, Ri),  0 },
+/*bd*/ { "mov",   FALSE, LONG,  op2(I, Ri),  0 },
+/*be*/ { "mov",   FALSE, LONG,  op2(I, Ri),  0 },
+/*bf*/ { "mov",   FALSE, LONG,  op2(I, Ri),  0 },
+
+/*c0*/ { "",      TRUE,  BYTE,  op2(Ib, E),  db_Grp2 },
+/*c1*/ { "",      TRUE,  LONG,  op2(Ib, E),  db_Grp2 },
+/*c2*/ { "ret",   FALSE, NONE,  op1(Iw),     0 },
+/*c3*/ { "ret",   FALSE, NONE,  0,           0 },
+/*c4*/ { "les",   TRUE,  LONG,  op2(E, R),   0 },
+/*c5*/ { "lds",   TRUE,  LONG,  op2(E, R),   0 },
+/*c6*/ { "mov",   TRUE,  BYTE,  op2(I, E),   0 },
+/*c7*/ { "mov",   TRUE,  LONG,  op2(I, E),   0 },
+
+/*c8*/ { "enter", FALSE, NONE,  op2(Iw, Ib), 0 },
+/*c9*/ { "leave", FALSE, NONE,  0,           0 },
+/*ca*/ { "lret",  FALSE, NONE,  op1(Iw),     0 },
+/*cb*/ { "lret",  FALSE, NONE,  0,           0 },
+/*cc*/ { "int",   FALSE, NONE,  op1(o3),     0 },
+/*cd*/ { "int",   FALSE, NONE,  op1(Ib),     0 },
+/*ce*/ { "into",  FALSE, NONE,  0,           0 },
+/*cf*/ { "iret",  FALSE, NONE,  0,           0 },
+
+/*d0*/ { "",      TRUE,  BYTE,  op2(o1, E),  db_Grp2 },
+/*d1*/ { "",      TRUE,  LONG,  op2(o1, E),  db_Grp2 },
+/*d2*/ { "",      TRUE,  BYTE,  op2(CL, E),  db_Grp2 },
+/*d3*/ { "",      TRUE,  LONG,  op2(CL, E),  db_Grp2 },
+/*d4*/ { "aam",   FALSE, NONE,  op1(Iba),    0 },
+/*d5*/ { "aad",   FALSE, NONE,  op1(Iba),    0 },
+/*d6*/ { ".byte\t0xd6", FALSE, NONE, 0,      0 },
+/*d7*/ { "xlat",  FALSE, BYTE,  op1(BX),     0 },
+
+/*d8*/  { "",      TRUE,  NONE,  0,          db_Esc8 },
+/*d9*/  { "",      TRUE,  NONE,  0,          db_Esc9 },
+/*da*/  { "",      TRUE,  NONE,  0,          db_Esca },
+/*db*/  { "",      TRUE,  NONE,  0,          db_Escb },
+/*dc*/  { "",      TRUE,  NONE,  0,          db_Escc },
+/*dd*/  { "",      TRUE,  NONE,  0,          db_Escd },
+/*de*/  { "",      TRUE,  NONE,  0,          db_Esce },
+/*df*/  { "",      TRUE,  NONE,  0,          db_Escf },
+
+/*e0*/ { "loopne",FALSE, NONE,  op1(Db),     0 },
+/*e1*/ { "loope", FALSE, NONE,  op1(Db),     0 },
+/*e2*/ { "loop",  FALSE, NONE,  op1(Db),     0 },
+/*e3*/ { "jcxz",  FALSE, SDEP,  op1(Db),     "jecxz" },
+/*e4*/ { "in",    FALSE, BYTE,  op2(Ib, A),  0 },
+/*e5*/ { "in",    FALSE, LONG,  op2(Ib, A) , 0 },
+/*e6*/ { "out",   FALSE, BYTE,  op2(A, Ib),  0 },
+/*e7*/ { "out",   FALSE, LONG,  op2(A, Ib) , 0 },
+
+/*e8*/ { "call",  FALSE, NONE,  op1(Dl),     0 },
+/*e9*/ { "jmp",   FALSE, NONE,  op1(Dl),     0 },
+/*ea*/ { "ljmp",  FALSE, NONE,  op1(OS),     0 },
+/*eb*/ { "jmp",   FALSE, NONE,  op1(Db),     0 },
+/*ec*/ { "in",    FALSE, BYTE,  op2(DX, A),  0 },
+/*ed*/ { "in",    FALSE, LONG,  op2(DX, A) , 0 },
+/*ee*/ { "out",   FALSE, BYTE,  op2(A, DX),  0 },
+/*ef*/ { "out",   FALSE, LONG,  op2(A, DX) , 0 },
+
+/*f0*/ { "",      FALSE, NONE,  0,          0 },
+/*f1*/ { ".byte\t0xf1", FALSE, NONE, 0,     0 },
+/*f2*/ { "",      FALSE, NONE,  0,          0 },
+/*f3*/ { "",      FALSE, NONE,  0,          0 },
+/*f4*/ { "hlt",   FALSE, NONE,  0,          0 },
+/*f5*/ { "cmc",   FALSE, NONE,  0,          0 },
+/*f6*/ { "",      TRUE,  BYTE,  0,          db_Grp3 },
+/*f7*/ { "",      TRUE,  LONG,  0,          db_Grp3 },
+
+/*f8*/ { "clc",   FALSE, NONE,  0,          0 },
+/*f9*/ { "stc",   FALSE, NONE,  0,          0 },
+/*fa*/ { "cli",   FALSE, NONE,  0,          0 },
+/*fb*/ { "sti",   FALSE, NONE,  0,          0 },
+/*fc*/ { "cld",   FALSE, NONE,  0,          0 },
+/*fd*/ { "std",   FALSE, NONE,  0,          0 },
+/*fe*/ { "",      TRUE,  NONE,  0,          db_Grp4 },
+/*ff*/ { "",      TRUE,  NONE,  0,          db_Grp5 },
+};
+
+static const struct inst db_bad_inst =
+       { "???",   FALSE, NONE,  0,           0 }
+;
+
+#define        f_mod(byte)     ((byte)>>6)
+#define        f_reg(byte)     (((byte)>>3)&0x7)
+#define        f_rm(byte)      ((byte)&0x7)
+
+#define        sib_ss(byte)    ((byte)>>6)
+#define        sib_index(byte) (((byte)>>3)&0x7)
+#define        sib_base(byte)  ((byte)&0x7)
+
+struct i_addr {
+       int             is_reg; /* if reg, reg number is in 'disp' */
+       int             disp;
+       const char *    base;
+       const char *    index;
+       int             ss;
+       int             defss;  /* default stack segment */
+};
+
+static const char * const db_index_reg_16[8] = {
+       "%bx,%si",
+       "%bx,%di",
+       "%bp,%si",
+       "%bp,%di",
+       "%si",
+       "%di",
+       "%bp",
+       "%bx"
+};
+
+static const char * const db_reg[3][8] = {
+       { "%al",  "%cl",  "%dl",  "%bl",  "%ah",  "%ch",  "%dh",  "%bh" },
+       { "%ax",  "%cx",  "%dx",  "%bx",  "%sp",  "%bp",  "%si",  "%di" },
+       { "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi" }
+};
+
+static const char * const db_seg_reg[8] = {
+       "%es", "%cs", "%ss", "%ds", "%fs", "%gs", "", ""
+};
+
+/*
+ * lengths for size attributes
+ */
+static const int db_lengths[] = {
+       1,      /* BYTE */
+       2,      /* WORD */
+       4,      /* LONG */
+       8,      /* QUAD */
+       4,      /* SNGL */
+       8,      /* DBLR */
+       10,     /* EXTR */
+};
+
+#define        get_value_inc(result, loc, size, is_signed) \
+       result = db_get_value((loc), (size), (is_signed)); \
+       (loc) += (size);
+
+static db_addr_t
+               db_disasm_esc (db_addr_t loc, int inst, int short_addr,
+                                  int size, const char *seg);
+static void    db_print_address (const char *seg, int size,
+                                     struct i_addr *addrp);
+static db_addr_t
+               db_read_address (db_addr_t loc, int short_addr,
+                                    int regmodrm, struct i_addr *addrp);
+
+/*
+ * Read address at location and return updated location.
+ */
+static db_addr_t
+db_read_address(db_addr_t loc, int short_addr, int regmodrm,
+               struct i_addr *addrp)
+{
+       int             mod, rm, sib, index, disp;
+
+       mod = f_mod(regmodrm);
+       rm  = f_rm(regmodrm);
+
+       if (mod == 3) {
+           addrp->is_reg = TRUE;
+           addrp->disp = rm;
+           return (loc);
+       }
+       addrp->is_reg = FALSE;
+       addrp->index = 0;
+       addrp->ss = 0;
+       addrp->defss = 0;
+
+       if (short_addr) {
+           if (mod != 3) {
+               switch(rm) {
+               case 0:
+               case 1:
+                   addrp->index = "%bx";
+                   break;
+               case 2:
+               case 3:
+                   addrp->index = "%bp";
+                   addrp->defss = 1;
+                   break;
+               case 6:
+                   if (mod == 1 || mod == 2)
+                       addrp->defss = 1;
+                   break;
+               }
+           }
+           switch (mod) {
+               case 0:
+                   if (rm == 6) {
+                       get_value_inc(disp, loc, 2, FALSE);
+                       addrp->disp = disp;
+                       addrp->base = 0;
+                   }
+                   else {
+                       addrp->disp = 0;
+                       addrp->base = db_index_reg_16[rm];
+                   }
+                   break;
+               case 1:
+                   get_value_inc(disp, loc, 1, TRUE);
+                   disp &= 0xFFFF;
+                   addrp->disp = disp;
+                   addrp->base = db_index_reg_16[rm];
+                   break;
+               case 2:
+                   get_value_inc(disp, loc, 2, FALSE);
+                   addrp->disp = disp;
+                   addrp->base = db_index_reg_16[rm];
+                   break;
+           }
+       } else {
+           if (mod != 3 && rm == 4) {
+               get_value_inc(sib, loc, 1, FALSE);
+               rm = sib_base(sib);
+               index = sib_index(sib);
+               if (index != 4)
+                   addrp->index = db_reg[LONG][index];
+               addrp->ss = sib_ss(sib);
+           }
+
+           switch (mod) {
+               case 0:
+                   if (rm == 5) {
+                       get_value_inc(addrp->disp, loc, 4, FALSE);
+                       addrp->base = 0;
+                   }
+                   else {
+                       addrp->disp = 0;
+                       addrp->base = db_reg[LONG][rm];
+                   }
+                   break;
+
+               case 1:
+                   get_value_inc(disp, loc, 1, TRUE);
+                   addrp->disp = disp;
+                   addrp->base = db_reg[LONG][rm];
+                   break;
+
+               case 2:
+                   get_value_inc(disp, loc, 4, FALSE);
+                   addrp->disp = disp;
+                   addrp->base = db_reg[LONG][rm];
+                   break;
+           }
+       }
+       return (loc);
+}
+
+static void
+db_print_address(const char *seg, int size, struct i_addr *addrp)
+{
+       if (addrp->is_reg) {
+           db_printf("%s", db_reg[size][addrp->disp]);
+           return;
+       }
+
+       if (seg) {
+           db_printf("%s:", seg);
+       } else if (addrp->defss) {
+           db_printf("%%ss:");
+       }
+
+       db_printsym((db_addr_t)addrp->disp, DB_STGY_ANY);
+       if (addrp->base != 0 || addrp->index != 0) {
+           db_printf("(");
+           if (addrp->base)
+               db_printf("%s", addrp->base);
+           if (addrp->index)
+               db_printf(",%s,%d", addrp->index, 1<<addrp->ss);
+           db_printf(")");
+       }
+}
+
+/*
+ * Disassemble floating-point ("escape") instruction
+ * and return updated location.
+ */
+static db_addr_t
+db_disasm_esc(db_addr_t loc, int inst, int short_addr, int size,
+             const char *seg)
+{
+       int             regmodrm;
+       const struct finst *    fp;
+       int             mod;
+       struct i_addr   address;
+       const char *    name;
+
+       get_value_inc(regmodrm, loc, 1, FALSE);
+       fp = &db_Esc_inst[inst - 0xd8][f_reg(regmodrm)];
+       mod = f_mod(regmodrm);
+       if (mod != 3) {
+           if (*fp->f_name == '\0') {
+               db_printf("<bad instruction>");
+               return (loc);
+           }
+           /*
+            * Normal address modes.
+            */
+           loc = db_read_address(loc, short_addr, regmodrm, &address);
+           db_printf("%s", fp->f_name);
+           switch(fp->f_size) {
+               case SNGL:
+                   db_printf("s");
+                   break;
+               case DBLR:
+                   db_printf("l");
+                   break;
+               case EXTR:
+                   db_printf("t");
+                   break;
+               case WORD:
+                   db_printf("s");
+                   break;
+               case LONG:
+                   db_printf("l");
+                   break;
+               case QUAD:
+                   db_printf("q");
+                   break;
+               default:
+                   break;
+           }
+           db_printf("\t");
+           db_print_address(seg, BYTE, &address);
+       }
+       else {
+           /*
+            * 'reg-reg' - special formats
+            */
+           switch (fp->f_rrmode) {
+               case op2(ST,STI):
+                   name = (fp->f_rrname) ? fp->f_rrname : fp->f_name;
+                   db_printf("%s\t%%st,%%st(%d)",name,f_rm(regmodrm));
+                   break;
+               case op2(STI,ST):
+                   name = (fp->f_rrname) ? fp->f_rrname : fp->f_name;
+                   db_printf("%s\t%%st(%d),%%st",name, f_rm(regmodrm));
+                   break;
+               case op1(STI):
+                   name = (fp->f_rrname) ? fp->f_rrname : fp->f_name;
+                   db_printf("%s\t%%st(%d)",name, f_rm(regmodrm));
+                   break;
+               case op1(X):
+                   name = ((const char * const *)fp->f_rrname)[f_rm(regmodrm)];
+                   if (*name == '\0')
+                       goto bad;
+                   db_printf("%s", name);
+                   break;
+               case op1(XA):
+                   name = ((const char * const *)fp->f_rrname)[f_rm(regmodrm)];
+                   if (*name == '\0')
+                       goto bad;
+                   db_printf("%s\t%%ax", name);
+                   break;
+               default:
+               bad:
+                   db_printf("<bad instruction>");
+                   break;
+           }
+       }
+
+       return (loc);
+}
+
+/*
+ * Disassemble instruction at 'loc'.  'altfmt' specifies an
+ * (optional) alternate format.  Return the address of the
+ * start of the next instruction.
+ *
+ * If regs is non-null it may be used to obtain context, such as
+ * whether we are in word or long mode.
+ */
+db_addr_t
+db_disasm(db_addr_t loc, boolean_t altfmt, db_regs_t *regs)
+{
+       int     inst;
+       int     size;
+       int     short_addr;
+       const char *    seg;
+       const struct inst *     ip;
+       const char *    i_name;
+       int     i_size;
+       int     i_mode;
+       int     regmodrm = 0;
+       boolean_t       first;
+       int     displ;
+       int     prefix;
+       int     imm;
+       int     imm2;
+       int     len;
+       struct i_addr   address;
+
+       get_value_inc(inst, loc, 1, FALSE);
+       seg = 0;
+
+#ifdef _GDT_ARRAY_PRESENT
+       if (regs && gdt[mycpu->gd_cpuid * NGDT + IDXSEL(regs->tf_cs & 0xFFFF)].sd.sd_def32 == 0) {
+               size = WORD;
+               short_addr = TRUE;
+       } else
+#endif
+       {
+               size = LONG;
+               short_addr = FALSE;
+       }
+
+       /*
+        * Get prefixes
+        */
+       prefix = TRUE;
+       do {
+           switch (inst) {
+               case 0x66:              /* data16 */
+                   size = WORD;
+                   break;
+               case 0x67:
+                   short_addr = TRUE;
+                   break;
+               case 0x26:
+                   seg = "%es";
+                   break;
+               case 0x36:
+                   seg = "%ss";
+                   break;
+               case 0x2e:
+                   seg = "%cs";
+                   break;
+               case 0x3e:
+                   seg = "%ds";
+                   break;
+               case 0x64:
+                   seg = "%fs";
+                   break;
+               case 0x65:
+                   seg = "%gs";
+                   break;
+               case 0xf0:
+                   db_printf("lock ");
+                   break;
+               case 0xf2:
+                   db_printf("repne ");
+                   break;
+               case 0xf3:
+                   db_printf("repe "); /* XXX repe VS rep */
+                   break;
+               default:
+                   prefix = FALSE;
+                   break;
+           }
+           if (prefix) {
+               get_value_inc(inst, loc, 1, FALSE);
+           }
+       } while (prefix);
+
+       if (inst >= 0xd8 && inst <= 0xdf) {
+           loc = db_disasm_esc(loc, inst, short_addr, size, seg);
+           db_printf("\n");
+           return (loc);
+       }
+
+       if (inst == 0x0f) {
+           get_value_inc(inst, loc, 1, FALSE);
+           ip = db_inst_0f[inst>>4];
+           if (ip == 0) {
+               ip = &db_bad_inst;
+           }
+           else {
+               ip = &ip[inst&0xf];
+           }
+       }
+       else
+           ip = &db_inst_table[inst];
+
+       if (ip->i_has_modrm) {
+           get_value_inc(regmodrm, loc, 1, FALSE);
+           loc = db_read_address(loc, short_addr, regmodrm, &address);
+       }
+
+       i_name = ip->i_name;
+       i_size = ip->i_size;
+       i_mode = ip->i_mode;
+
+       if (ip->i_extra == db_Grp1 || ip->i_extra == db_Grp2 ||
+           ip->i_extra == db_Grp6 || ip->i_extra == db_Grp7 ||
+           ip->i_extra == db_Grp8 || ip->i_extra == db_Grp9) {
+           i_name = ((const char * const *)ip->i_extra)[f_reg(regmodrm)];
+       }
+       else if (ip->i_extra == db_Grp3) {
+           ip = ip->i_extra;
+           ip = &ip[f_reg(regmodrm)];
+           i_name = ip->i_name;
+           i_mode = ip->i_mode;
+       }
+       else if (ip->i_extra == db_Grp4 || ip->i_extra == db_Grp5) {
+           ip = ip->i_extra;
+           ip = &ip[f_reg(regmodrm)];
+           i_name = ip->i_name;
+           i_mode = ip->i_mode;
+           i_size = ip->i_size;
+       }
+
+       if (i_size == SDEP) {
+           if (size == WORD)
+               db_printf("%s", i_name);
+           else
+               db_printf("%s", (const char *)ip->i_extra);
+       }
+       else {
+           db_printf("%s", i_name);
+           if (i_size != NONE) {
+               if (i_size == BYTE) {
+                   db_printf("b");
+                   size = BYTE;
+               }
+               else if (i_size == WORD) {
+                   db_printf("w");
+                   size = WORD;
+               }
+               else if (size == WORD)
+                   db_printf("w");
+               else
+                   db_printf("l");
+           }
+       }
+       db_printf("\t");
+       for (first = TRUE;
+            i_mode != 0;
+            i_mode >>= 8, first = FALSE)
+       {
+           if (!first)
+               db_printf(",");
+
+           switch (i_mode & 0xFF) {
+
+               case E:
+                   db_print_address(seg, size, &address);
+                   break;
+
+               case Eind:
+                   db_printf("*");
+                   db_print_address(seg, size, &address);
+                   break;
+
+               case El:
+                   db_print_address(seg, LONG, &address);
+                   break;
+
+               case Ew:
+                   db_print_address(seg, WORD, &address);
+                   break;
+
+               case Eb:
+                   db_print_address(seg, BYTE, &address);
+                   break;
+
+               case R:
+                   db_printf("%s", db_reg[size][f_reg(regmodrm)]);
+                   break;
+
+               case Rw:
+                   db_printf("%s", db_reg[WORD][f_reg(regmodrm)]);
+                   break;
+
+               case Ri:
+                   db_printf("%s", db_reg[size][f_rm(inst)]);
+                   break;
+
+               case Ril:
+                   db_printf("%s", db_reg[LONG][f_rm(inst)]);
+                   break;
+
+               case S:
+                   db_printf("%s", db_seg_reg[f_reg(regmodrm)]);
+                   break;
+
+               case Si:
+                   db_printf("%s", db_seg_reg[f_reg(inst)]);
+                   break;
+
+               case A:
+                   db_printf("%s", db_reg[size][0]);   /* acc */
+                   break;
+
+               case BX:
+                   if (seg)
+                       db_printf("%s:", seg);
+                   db_printf("(%s)", short_addr ? "%bx" : "%ebx");
+                   break;
+
+               case CL:
+                   db_printf("%%cl");
+                   break;
+
+               case DX:
+                   db_printf("%%dx");
+                   break;
+
+               case SI:
+                   if (seg)
+                       db_printf("%s:", seg);
+                   db_printf("(%s)", short_addr ? "%si" : "%esi");
+                   break;
+
+               case DI:
+                   db_printf("%%es:(%s)", short_addr ? "%di" : "%edi");
+                   break;
+
+               case CR:
+                   db_printf("%%cr%d", f_reg(regmodrm));
+                   break;
+
+               case DR:
+                   db_printf("%%dr%d", f_reg(regmodrm));
+                   break;
+
+               case TR:
+                   db_printf("%%tr%d", f_reg(regmodrm));
+                   break;
+
+               case I:
+                   len = db_lengths[size];
+                   get_value_inc(imm, loc, len, FALSE);
+                   db_printf("$%#r", imm);
+                   break;
+
+               case Is:
+                   len = db_lengths[size];
+                   get_value_inc(imm, loc, len, FALSE);
+                   db_printf("$%+#r", imm);
+                   break;
+
+               case Ib:
+                   get_value_inc(imm, loc, 1, FALSE);
+                   db_printf("$%#r", imm);
+                   break;
+
+               case Iba:
+                   get_value_inc(imm, loc, 1, FALSE);
+                   if (imm != 0x0a)
+                       db_printf("$%#r", imm);
+                   break;
+
+               case Ibs:
+                   get_value_inc(imm, loc, 1, TRUE);
+                   if (size == WORD)
+                       imm &= 0xFFFF;
+                   db_printf("$%+#r", imm);
+                   break;
+
+               case Iw:
+                   get_value_inc(imm, loc, 2, FALSE);
+                   db_printf("$%#r", imm);
+                   break;
+
+               case O:
+                   len = (short_addr ? 2 : 4);
+                   get_value_inc(displ, loc, len, FALSE);
+                   if (seg)
+                       db_printf("%s:%+#r",seg, displ);
+                   else
+                       db_printsym((db_addr_t)displ, DB_STGY_ANY);
+                   break;
+
+               case Db:
+                   get_value_inc(displ, loc, 1, TRUE);
+                   displ += loc;
+                   if (size == WORD)
+                       displ &= 0xFFFF;
+                   db_printsym((db_addr_t)displ, DB_STGY_XTRN);
+                   break;
+
+               case Dl:
+                   len = db_lengths[size];
+                   get_value_inc(displ, loc, len, FALSE);
+                   displ += loc;
+                   if (size == WORD)
+                       displ &= 0xFFFF;
+                   db_printsym((db_addr_t)displ, DB_STGY_XTRN);
+                   break;
+
+               case o1:
+                   db_printf("$1");
+                   break;
+
+               case o3:
+                   db_printf("$3");
+                   break;
+
+               case OS:
+                   len = db_lengths[size];
+                   get_value_inc(imm, loc, len, FALSE);        /* offset */
+                   get_value_inc(imm2, loc, 2, FALSE); /* segment */
+                   db_printf("$%#r,%#r", imm2, imm);
+                   break;
+           }
+       }
+       db_printf("\n");
+       return (loc);
+}
diff --git a/sys/platform/pc64/amd64/db_interface.c b/sys/platform/pc64/amd64/db_interface.c
new file mode 100644 (file)
index 0000000..ad81145
--- /dev/null
@@ -0,0 +1,329 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+ * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
+ *  School of Computer Science
+ *  Carnegie Mellon University
+ *  Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ *
+ * $FreeBSD: src/sys/i386/i386/db_interface.c,v 1.48.2.1 2000/07/07 00:38:46 obrien Exp $
+ * $DragonFly: src/sys/platform/pc64/amd64/db_interface.c,v 1.1 2007/09/23 04:29:31 yanyh Exp $
+ * $DragonFly: src/sys/platform/pc64/amd64/db_interface.c,v 1.1 2007/09/23 04:29:31 yanyh Exp $
+ */
+
+/*
+ * Interface to new debugger.
+ */
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/reboot.h>
+#include <sys/cons.h>
+
+#include <machine/cpu.h>
+#include <machine/smp.h>
+#include <machine/globaldata.h>
+#include <machine/md_var.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <ddb/ddb.h>
+
+#include <setjmp.h>
+
+static jmp_buf *db_nofault = 0;
+extern jmp_buf db_jmpbuf;
+
+int    db_active;
+db_regs_t ddb_regs;
+
+static jmp_buf db_global_jmpbuf;
+static int     db_global_jmpbuf_valid;
+
+#ifdef __GNUC__
+#define        rss() ({u_short ss; __asm __volatile("mov %%ss,%0" : "=r" (ss)); ss;})
+#endif
+
+/*
+ *  kdb_trap - field a TRACE or BPT trap
+ */
+int
+kdb_trap(int type, int code, struct amd64_saved_state *regs)
+{
+       volatile int ddb_mode = !(boothowto & RB_GDB);
+
+       /*
+        * XXX try to do nothing if the console is in graphics mode.
+        * Handle trace traps (and hardware breakpoints...) by ignoring
+        * them except for forgetting about them.  Return 0 for other
+        * traps to say that we haven't done anything.  The trap handler
+        * will usually panic.  We should handle breakpoint traps for
+        * our breakpoints by disarming our breakpoints and fixing up
+        * %eip.
+        */
+       if (cons_unavail && ddb_mode) {
+           if (type == T_TRCTRAP) {
+               regs->tf_rflags &= ~PSL_T;
+               return (1);
+           }
+           return (0);
+       }
+
+       switch (type) {
+           case T_BPTFLT:      /* breakpoint */
+           case T_TRCTRAP:     /* debug exception */
+               break;
+
+           default:
+               /*
+                * XXX this is almost useless now.  In most cases,
+                * trap_fatal() has already printed a much more verbose
+                * message.  However, it is dangerous to print things in
+                * trap_fatal() - kprintf() might be reentered and trap.
+                * The debugger should be given control first.
+                */
+               if (ddb_mode)
+                   db_printf("kernel: type %d trap, code=%x\n", type, code);
+
+               if (db_nofault) {
+                   jmp_buf *no_fault = db_nofault;
+                   db_nofault = 0;
+                   longjmp(*no_fault, 1);
+               }
+       }
+
+       /*
+        * This handles unexpected traps in ddb commands, including calls to
+        * non-ddb functions.  db_nofault only applies to memory accesses by
+        * internal ddb commands.
+        */
+       if (db_global_jmpbuf_valid)
+           longjmp(db_global_jmpbuf, 1);
+
+       /*
+        * XXX We really should switch to a local stack here.
+        */
+       ddb_regs = *regs;
+
+       /*
+        * If in kernel mode, esp and ss are not saved, so dummy them up.
+        */
+       if (ISPL(regs->tf_cs) == 0) {
+           ddb_regs.tf_rsp = (int)&regs->tf_rsp;
+           ddb_regs.tf_ss = rss();
+       }
+
+#ifdef SMP
+       db_printf("\nCPU%d stopping CPUs: 0x%08x\n", 
+           mycpu->gd_cpuid, mycpu->gd_other_cpus);
+
+       /* We stop all CPUs except ourselves (obviously) */
+       stop_cpus(mycpu->gd_other_cpus);
+
+       db_printf(" stopped\n");
+#endif /* SMP */
+
+       setjmp(db_global_jmpbuf);
+       db_global_jmpbuf_valid = TRUE;
+       db_active++;
+       /* vcons_set_mode(1); */
+       if (ddb_mode) {
+           cndbctl(TRUE);
+           db_trap(type, code);
+           cndbctl(FALSE);
+       } else
+           /* gdb_handle_exception(&ddb_regs, type, code); */
+       db_active--;
+       /* vcons_set_mode(0); */
+       db_global_jmpbuf_valid = FALSE;
+
+#ifdef SMP
+       db_printf("\nCPU%d restarting CPUs: 0x%08x\n",
+           mycpu->gd_cpuid, stopped_cpus);
+
+       /* Restart all the CPUs we previously stopped */
+       if (stopped_cpus != mycpu->gd_other_cpus) {
+               db_printf("whoa, other_cpus: 0x%08x, stopped_cpus: 0x%08x\n",
+                         mycpu->gd_other_cpus, stopped_cpus);
+               panic("stop_cpus() failed");
+       }
+       restart_cpus(stopped_cpus);
+
+       db_printf(" restarted\n");
+#endif /* SMP */
+
+       regs->tf_rip    = ddb_regs.tf_rip;
+       regs->tf_rflags = ddb_regs.tf_rflags;
+       regs->tf_rax    = ddb_regs.tf_rax;
+       regs->tf_rcx    = ddb_regs.tf_rcx;
+       regs->tf_rdx    = ddb_regs.tf_rdx;
+       regs->tf_rbx    = ddb_regs.tf_rbx;
+
+       /*
+        * If in user mode, the saved ESP and SS were valid, restore them.
+        */
+       if (ISPL(regs->tf_cs)) {
+           regs->tf_rsp = ddb_regs.tf_rsp;
+           regs->tf_ss  = ddb_regs.tf_ss & 0xffff;
+       }
+
+       regs->tf_rbp    = ddb_regs.tf_rbp;
+       regs->tf_rsi    = ddb_regs.tf_rsi;
+       regs->tf_rdi    = ddb_regs.tf_rdi;
+       /* regs->tf_es     = ddb_regs.tf_es & 0xffff; */
+       /* regs->tf_fs     = ddb_regs.tf_fs & 0xffff; */
+       /* regs->tf_gs     = ddb_regs.tf_gs & 0xffff; */
+       regs->tf_cs     = ddb_regs.tf_cs & 0xffff;
+       /* regs->tf_ds     = ddb_regs.tf_ds & 0xffff; */
+       return (1);
+}
+
+/*
+ * Read bytes from kernel address space for debugger.
+ */
+void
+db_read_bytes(vm_offset_t addr, size_t size, char *data)
+{
+       char    *src;
+
+       db_nofault = &db_jmpbuf;
+
+       src = (char *)addr;
+       while (size-- > 0)
+           *data++ = *src++;
+
+       db_nofault = 0;
+}
+
+/*
+ * Write bytes to kernel address space for debugger.
+ */
+void
+db_write_bytes(vm_offset_t addr, size_t size, char *data)
+{
+       char    *dst;
+#if 0
+       vpte_t  *ptep0 = NULL;
+       vpte_t  oldmap0 = 0;
+       vm_offset_t     addr1;
+       vpte_t  *ptep1 = NULL;
+       vpte_t  oldmap1 = 0;
+#endif
+
+       db_nofault = &db_jmpbuf;
+#if 0
+       if (addr > trunc_page((vm_offset_t)btext) - size &&
+           addr < round_page((vm_offset_t)etext)) {
+
+           ptep0 = pmap_kpte(addr);
+           oldmap0 = *ptep0;
+           *ptep0 |= VPTE_W;
+
+           /* Map another page if the data crosses a page boundary. */
+           if ((*ptep0 & PG_PS) == 0) {
+               addr1 = trunc_page(addr + size - 1);
+               if (trunc_page(addr) != addr1) {
+                   ptep1 = pmap_kpte(addr1);
+                   oldmap1 = *ptep1;
+                   *ptep1 |= VPTE_W;
+               }
+           } else {
+               addr1 = trunc_4mpage(addr + size - 1);
+               if (trunc_4mpage(addr) != addr1) {
+                   ptep1 = pmap_kpte(addr1);
+                   oldmap1 = *ptep1;
+                   *ptep1 |= VPTE_W;
+               }
+           }
+
+           cpu_invltlb();
+       }
+#endif
+
+       dst = (char *)addr;
+
+       while (size-- > 0)
+           *dst++ = *data++;
+
+       db_nofault = 0;
+
+#if 0
+       if (ptep0) {
+           *ptep0 = oldmap0;
+
+           if (ptep1)
+               *ptep1 = oldmap1;
+
+           cpu_invltlb();
+       }
+#endif
+}
+
+/*
+ * The debugger sometimes needs to know the actual KVM address represented
+ * by the instruction pointer, stack pointer, or base pointer.  Normally
+ * the actual KVM address is simply the contents of the register.  However,
+ * if the debugger is entered from the BIOS or VM86 we need to figure out
+ * the offset from the segment register.
+ */
+db_addr_t
+PC_REGS(db_regs_t *regs)
+{
+    return(regs->tf_rip);
+}
+
+db_addr_t
+SP_REGS(db_regs_t *regs)
+{
+    return(regs->tf_rsp);
+}
+
+db_addr_t
+BP_REGS(db_regs_t *regs)
+{
+    return(regs->tf_rbp);
+}
+
+/*
+ * XXX
+ * Move this to machdep.c and allow it to be called if any debugger is
+ * installed.
+ */
+void
+Debugger(const char *msg)
+{
+       static volatile u_char in_Debugger;
+
+       /*
+        * XXX
+        * Do nothing if the console is in graphics mode.  This is
+        * OK if the call is for the debugger hotkey but not if the call
+        * is a weak form of panicing.
+        */
+       if (cons_unavail && !(boothowto & RB_GDB))
+           return;
+
+       if (!in_Debugger) {
+           in_Debugger = 1;
+           db_printf("Debugger(\"%s\")\n", msg);
+           breakpoint();
+           in_Debugger = 0;
+       }
+}
diff --git a/sys/platform/pc64/amd64/db_trace.c b/sys/platform/pc64/amd64/db_trace.c
new file mode 100644 (file)
index 0000000..8723318
--- /dev/null
@@ -0,0 +1,624 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+ * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
+ *  School of Computer Science
+ *  Carnegie Mellon University
+ *  Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ *
+ * $FreeBSD: src/sys/i386/i386/db_trace.c,v 1.35.2.3 2002/02/21 22:31:25 silby Exp $
+ * $DragonFly: src/sys/platform/pc64/amd64/db_trace.c,v 1.1 2007/09/23 04:29:31 yanyh Exp $
+ * $DragonFly: src/sys/platform/pc64/amd64/db_trace.c,v 1.1 2007/09/23 04:29:31 yanyh Exp $ 
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/linker_set.h>
+#include <sys/lock.h>
+#include <sys/proc.h>
+#include <sys/reg.h>
+
+#include <machine/cpu.h>
+#include <machine/md_var.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <ddb/ddb.h>
+#include <dlfcn.h>     /* DLL */
+
+#include <sys/user.h>
+
+#include <ddb/db_access.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_variables.h>
+
+db_varfcn_t db_dr0;
+db_varfcn_t db_dr1;
+db_varfcn_t db_dr2;
+db_varfcn_t db_dr3;
+db_varfcn_t db_dr4;
+db_varfcn_t db_dr5;
+db_varfcn_t db_dr6;
+db_varfcn_t db_dr7;
+
+/*
+ * Machine register set.
+ */
+struct db_variable db_regs[] = {
+       { "cs",         &ddb_regs.tf_cs,     FCN_NULL },
+/*     { "ds",         &ddb_regs.tf_ds,     FCN_NULL },
+       { "es",         &ddb_regs.tf_es,     FCN_NULL },
+       { "fs",         &ddb_regs.tf_fs,     FCN_NULL },
+       { "gs",         &ddb_regs.tf_gs,     FCN_NULL }, */
+       { "ss",         &ddb_regs.tf_ss,     FCN_NULL },
+       { "rax",        &ddb_regs.tf_rax,    FCN_NULL },
+       { "rcx",        &ddb_regs.tf_rcx,    FCN_NULL },
+       { "rdx",        &ddb_regs.tf_rdx,    FCN_NULL },
+       { "rbx",        &ddb_regs.tf_rbx,    FCN_NULL },
+       { "rsp",        &ddb_regs.tf_rsp,    FCN_NULL },
+       { "rbp",        &ddb_regs.tf_rbp,    FCN_NULL },
+       { "rsi",        &ddb_regs.tf_rsi,    FCN_NULL },
+       { "rdi",        &ddb_regs.tf_rdi,    FCN_NULL },
+       { "rip",        &ddb_regs.tf_rip,    FCN_NULL },
+       { "rfl",        &ddb_regs.tf_rflags, FCN_NULL },
+       { "dr0",        NULL,                db_dr0 },
+       { "dr1",        NULL,                db_dr1 },
+       { "dr2",        NULL,                db_dr2 },
+       { "dr3",        NULL,                db_dr3 },
+       { "dr4",        NULL,                db_dr4 },
+       { "dr5",        NULL,                db_dr5 },
+       { "dr6",        NULL,                db_dr6 },
+       { "dr7",        NULL,                db_dr7 },
+};
+struct db_variable *db_eregs = db_regs + sizeof(db_regs)/sizeof(db_regs[0]);
+
+/*
+ * Stack trace.
+ */
+#define        INKERNEL(va)    (((vm_offset_t)(va)) >= USRSTACK)
+
+struct amd64_frame {
+       struct amd64_frame      *f_frame;
+       long                    f_retaddr;
+       long                    f_arg0;
+};
+
+#define NORMAL         0
+#define        TRAP            1
+#define        INTERRUPT       2
+#define        SYSCALL         3
+
+static void    db_nextframe(struct amd64_frame **, db_addr_t *);
+static int     db_numargs(struct amd64_frame *);
+static void    db_print_stack_entry(const char *, int, char **, int *, db_addr_t);
+static void    dl_symbol_values(int callpc, const char **name);
+
+
+static char    *watchtype_str(int type);
+static int     kamd64_set_watch(int watchnum, unsigned int watchaddr, 
+                               int size, int access, struct dbreg * d);
+static int     kamd64_clr_watch(int watchnum, struct dbreg * d);
+int            db_md_set_watchpoint(db_expr_t addr, db_expr_t size);
+int            db_md_clr_watchpoint(db_expr_t addr, db_expr_t size);
+void           db_md_list_watchpoints(void);
+
+
+/*
+ * Figure out how many arguments were passed into the frame at "fp".
+ */
+static int
+db_numargs(struct amd64_frame *fp)
+{
+       int     args;
+#if 0
+       int     *argp;
+       int     inst;
+
+       argp = (int *)db_get_value((int)&fp->f_retaddr, 4, FALSE);
+       /*
+        * XXX etext is wrong for LKMs.  We should attempt to interpret
+        * the instruction at the return address in all cases.  This
+        * may require better fault handling.
+        */
+       if (argp < (int *)btext || argp >= (int *)etext) {
+               args = 5;
+       } else {
+               inst = db_get_value((int)argp, 4, FALSE);
+               if ((inst & 0xff) == 0x59)      /* popl %ecx */
+                       args = 1;
+               else if ((inst & 0xffff) == 0xc483)     /* addl $Ibs, %esp */
+                       args = ((inst >> 16) & 0xff) / 4;
+               else
+                       args = 5;
+       }
+#endif
+       args = 5;
+       return(args);
+}
+
+static void
+db_print_stack_entry(const char *name, int narg, char **argnp, int *argp,
+                    db_addr_t callpc)
+{
+       db_printf("%s(", name);
+       while (narg) {
+               if (argnp)
+                       db_printf("%s=", *argnp++);
+               db_printf("%r", db_get_value((int)argp, 4, FALSE));
+               argp++;
+               if (--narg != 0)
+                       db_printf(",");
+       }
+       db_printf(") at ");
+       db_printsym(callpc, DB_STGY_PROC);
+       db_printf("\n");
+}
+
+/*
+ * Figure out the next frame up in the call stack.
+ */
+static void
+db_nextframe(struct amd64_frame **fp, db_addr_t *ip)
+{
+       struct trapframe *tf;
+       int frame_type;
+       int eip, esp, ebp;
+       db_expr_t offset;
+       const char *sym, *name;
+
+       eip = db_get_value((int) &(*fp)->f_retaddr, 4, FALSE);
+       ebp = db_get_value((int) &(*fp)->f_frame, 4, FALSE);
+
+       /*
+        * Figure out frame type.
+        */
+
+       frame_type = NORMAL;
+
+       sym = db_search_symbol(eip, DB_STGY_ANY, &offset);
+       db_symbol_values(sym, &name, NULL);
+       dl_symbol_values(eip, &name);
+       if (name != NULL) {
+               if (!strcmp(name, "calltrap")) {
+                       frame_type = TRAP;
+               } else if (!strncmp(name, "Xresume", 7)) {
+                       frame_type = INTERRUPT;
+               } else if (!strcmp(name, "_Xsyscall")) {
+                       frame_type = SYSCALL;
+               }
+       }
+
+       /*
+        * Normal frames need no special processing.
+        */
+       if (frame_type == NORMAL) {
+               *ip = (db_addr_t) eip;
+               *fp = (struct amd64_frame *) ebp;
+               return;
+       }
+
+       db_print_stack_entry(name, 0, 0, 0, eip);
+
+       /*
+        * Point to base of trapframe which is just above the
+        * current frame.
+        */
+       tf = (struct trapframe *) ((int)*fp + 8);
+
+#if 0
+       esp = (ISPL(tf->tf_cs) == SEL_UPL) ?  tf->tf_rsp : (int)&tf->tf_rsp;
+#endif
+       esp = (int)&tf->tf_rsp;
+
+       switch (frame_type) {
+       case TRAP:
+               {
+                       eip = tf->tf_rip;
+                       ebp = tf->tf_rbp;
+                       db_printf(
+                   "--- trap %#r, eip = %#r, esp = %#r, ebp = %#r ---\n",
+                           tf->tf_trapno, eip, esp, ebp);
+               }
+               break;
+       case SYSCALL:
+               {
+                       eip = tf->tf_rip;
+                       ebp = tf->tf_rbp;
+                       db_printf(
+                   "--- syscall %#r, eip = %#r, esp = %#r, ebp = %#r ---\n",
+                           tf->tf_rax, eip, esp, ebp);
+               }
+               break;
+       case INTERRUPT:
+               tf = (struct trapframe *)((int)*fp + 16);
+               {
+                       eip = tf->tf_rip;
+                       ebp = tf->tf_rbp;
+                       db_printf(
+                   "--- interrupt, eip = %#r, esp = %#r, ebp = %#r ---\n",
+                           eip, esp, ebp);
+               }
+               break;
+       default:
+               break;
+       }
+
+       *ip = (db_addr_t) eip;
+       *fp = (struct amd64_frame *) ebp;
+}
+
+void
+db_stack_trace_cmd(db_expr_t addr, boolean_t have_addr, db_expr_t count,
+                  char *modif)
+{
+       struct amd64_frame *frame;
+       int *argp;
+       db_addr_t callpc;
+       boolean_t first;
+       int i;
+
+       if (count == -1)
+               count = 1024;
+
+       if (!have_addr) {
+               frame = (struct amd64_frame *)BP_REGS(&ddb_regs);
+               if (frame == NULL)
+                       frame = (struct amd64_frame *)(SP_REGS(&ddb_regs) - 4);
+               callpc = PC_REGS(&ddb_regs);
+       } else {
+               /*
+                * Look for something that might be a frame pointer, just as
+                * a convenience.
+                */
+               frame = (struct amd64_frame *)addr;
+               for (i = 0; i < 4096; i += 4) {
+                       struct amd64_frame *check;
+
+                       check = (struct amd64_frame *)db_get_value((int)((char *)&frame->f_frame + i), 4, FALSE);
+                       if ((char *)check - (char *)frame >= 0 &&
+                           (char *)check - (char *)frame < 4096
+                       ) {
+                               break;
+                       }
+                       db_printf("%p does not look like a stack frame, skipping\n", (char *)&frame->f_frame + i);
+               }
+               if (i == 4096) {
+                       db_printf("Unable to find anything that looks like a stack frame\n");
+                       return;
+               }
+               frame = (void *)((char *)frame + i);
+               db_printf("Trace beginning at frame %p\n", frame);
+               callpc = (db_addr_t)db_get_value((int)&frame->f_retaddr, 4, FALSE);
+       }
+
+       first = TRUE;
+       while (count--) {
+               struct amd64_frame *actframe;
+               int             narg;
+               const char *    name;
+               db_expr_t       offset;
+               c_db_sym_t      sym;
+#define MAXNARG        16
+               char    *argnames[MAXNARG], **argnp = NULL;
+
+               sym = db_search_symbol(callpc, DB_STGY_ANY, &offset);
+               db_symbol_values(sym, &name, NULL);
+               dl_symbol_values(callpc, &name);
+
+               /*
+                * Attempt to determine a (possibly fake) frame that gives
+                * the caller's pc.  It may differ from `frame' if the
+                * current function never sets up a standard frame or hasn't
+                * set one up yet or has just discarded one.  The last two
+                * cases can be guessed fairly reliably for code generated
+                * by gcc.  The first case is too much trouble to handle in
+                * general because the amount of junk on the stack depends
+                * on the pc (the special handling of "calltrap", etc. in
+                * db_nextframe() works because the `next' pc is special).
+                */
+               actframe = frame;
+               if (first) {
+                       if (!have_addr) {
+                               int instr;
+
+                               instr = db_get_value(callpc, 4, FALSE);
+                               if ((instr & 0x00ffffff) == 0x00e58955) {
+                                       /* pushl %ebp; movl %esp, %ebp */
+                                       actframe = (struct amd64_frame *)
+                                           (SP_REGS(&ddb_regs) - 4);
+                               } else if ((instr & 0x0000ffff) == 0x0000e589) {
+                                       /* movl %esp, %ebp */
+                                       actframe = (struct amd64_frame *)
+                                           SP_REGS(&ddb_regs);
+                                       if (ddb_regs.tf_rbp == 0) {
+                                               /* Fake caller's frame better. */
+                                               frame = actframe;
+                                       }
+                               } else if ((instr & 0x000000ff) == 0x000000c3) {
+                                       /* ret */
+                                       actframe = (struct amd64_frame *)
+                                           (SP_REGS(&ddb_regs) - 4);
+                               } else if (offset == 0) {
+                                       /* Probably a symbol in assembler code. */
+                                       actframe = (struct amd64_frame *)
+                                           (SP_REGS(&ddb_regs) - 4);
+                               }
+                       } else if (name != NULL &&
+                                  strcmp(name, "fork_trampoline") == 0) {
+                               /*
+                                * Don't try to walk back on a stack for a
+                                * process that hasn't actually been run yet.
+                                */
+                               db_print_stack_entry(name, 0, 0, 0, callpc);
+                               break;
+                       }
+                       first = FALSE;
+               }
+
+               argp = &actframe->f_arg0;
+               narg = MAXNARG;
+               if (sym != NULL && db_sym_numargs(sym, &narg, argnames)) {
+                       argnp = argnames;
+               } else {
+                       narg = db_numargs(frame);
+               }
+
+               db_print_stack_entry(name, narg, argnp, argp, callpc);
+
+               if (actframe != frame) {
+                       /* `frame' belongs to caller. */
+                       callpc = (db_addr_t)
+                           db_get_value((int)&actframe->f_retaddr, 4, FALSE);
+                       continue;
+               }
+
+               db_nextframe(&frame, &callpc);
+               if (frame == 0)
+                       break;
+       }
+}
+
+void
+db_print_backtrace(void)
+{
+       register_t  ebp;
+
+       /* __asm __volatile("movl %%ebp, %0" : "=r" (ebp)); */
+       db_stack_trace_cmd(ebp, 1, -1, NULL);
+}
+
+#define DB_DRX_FUNC(reg)                                               \
+int                                                                    \
+db_ ## reg (struct db_variable *vp, db_expr_t *valuep, int op)         \
+{                                                                      \
+       if (op == DB_VAR_GET)                                           \
+               *valuep = r ## reg ();                                  \
+       else                                                            \
+               load_ ## reg (*valuep);                                 \
+                                                                       \
+       return(0);                                                      \
+} 
+
+DB_DRX_FUNC(dr0)
+DB_DRX_FUNC(dr1)
+DB_DRX_FUNC(dr2)
+DB_DRX_FUNC(dr3)
+DB_DRX_FUNC(dr4)
+DB_DRX_FUNC(dr5)
+DB_DRX_FUNC(dr6)
+DB_DRX_FUNC(dr7)
+
+static int
+kamd64_set_watch(int watchnum, unsigned int watchaddr, int size, int access,
+              struct dbreg *d)
+{
+       int i;
+       unsigned int mask;
+       
+       if (watchnum == -1) {
+               for (i = 0, mask = 0x3; i < 4; i++, mask <<= 2)
+                       if ((d->dr[7] & mask) == 0)
+                               break;
+               if (i < 4)
+                       watchnum = i;
+               else
+                       return(-1);
+       }
+       
+       switch (access) {
+       case DBREG_DR7_EXEC:
+               size = 1; /* size must be 1 for an execution breakpoint */
+               /* fall through */
+       case DBREG_DR7_WRONLY:
+       case DBREG_DR7_RDWR:
+               break;
+       default:
+               return(-1);
+       }
+
+       /*
+        * we can watch a 1, 2, or 4 byte sized location
+        */
+       switch (size) {
+       case 1:
+               mask = 0x00;
+               break;
+       case 2:
+               mask = 0x01 << 2;
+               break;
+       case 4:
+               mask = 0x03 << 2;
+               break;
+       default:
+               return(-1);
+       }
+
+       mask |= access;
+
+       /* clear the bits we are about to affect */
+       d->dr[7] &= ~((0x3 << (watchnum * 2)) | (0x0f << (watchnum * 4 + 16)));
+
+       /* set drN register to the address, N=watchnum */
+       DBREG_DRX(d, watchnum) = watchaddr;
+
+       /* enable the watchpoint */
+       d->dr[7] |= (0x2 << (watchnum * 2)) | (mask << (watchnum * 4 + 16));
+
+       return(watchnum);
+}
+
+
+int
+kamd64_clr_watch(int watchnum, struct dbreg *d)
+{
+       if (watchnum < 0 || watchnum >= 4)
+               return(-1);
+       
+       d->dr[7] &= ~((0x3 << (watchnum * 2)) | (0x0f << (watchnum * 4 + 16)));
+       DBREG_DRX(d, watchnum) = 0;
+       
+       return(0);
+}
+
+
+int
+db_md_set_watchpoint(db_expr_t addr, db_expr_t size)
+{
+       int avail, wsize;
+       int i;
+