AMD64 - Enable module building, sync i386 headers etc as needed.
authorMatthew Dillon <dillon@apollo.backplane.com>
Wed, 8 Jul 2009 19:33:02 +0000 (12:33 -0700)
committerMatthew Dillon <dillon@apollo.backplane.com>
Wed, 8 Jul 2009 19:33:02 +0000 (12:33 -0700)
* Enable module building (platform/pc64/Makefile.inc)

* Copy files and make changes as needed to bring amd64 up to date.

26 files changed:
sys/cpu/amd64/include/atomic.h
sys/cpu/amd64/include/cputypes.h
sys/cpu/amd64/include/specialreg.h
sys/dev/acpica5/Makefile
sys/dev/acpica5/Osd/OsdSchedule.c
sys/dev/acpica5/Osd/OsdSynch.c
sys/dev/acpica5/acpi_timer.c
sys/dev/video/amd64/Makefile [new file with mode: 0644]
sys/platform/pc64/Makefile.inc
sys/platform/pc64/acpica5/Makefile [new file with mode: 0644]
sys/platform/pc64/acpica5/OsdEnvironment.c [new file with mode: 0644]
sys/platform/pc64/acpica5/acpi_machdep.c [new file with mode: 0644]
sys/platform/pc64/acpica5/acpi_pstate_machdep.c [new file with mode: 0644]
sys/platform/pc64/acpica5/acpi_wakecode.S [new file with mode: 0644]
sys/platform/pc64/acpica5/acpi_wakeup.c [new file with mode: 0644]
sys/platform/pc64/acpica5/genwakecode.sh [new file with mode: 0644]
sys/platform/pc64/acpica5/madt.c [new file with mode: 0644]
sys/platform/pc64/amd64/est.c [new file with mode: 0644]
sys/platform/pc64/amd64/identcpu.c
sys/platform/pc64/apm/apm.c [new file with mode: 0644]
sys/platform/pc64/apm/apm.h [new file with mode: 0644]
sys/platform/pc64/include/acpica_machdep.h
sys/platform/pc64/include/apm_bios.h [new file with mode: 0644]
sys/platform/pc64/include/apm_segments.h [new file with mode: 0644]
sys/platform/pc64/include/cpufreq.h [new file with mode: 0644]
sys/platform/pc64/include/md_var.h

index 99a0cbf..25facb4 100644 (file)
@@ -417,6 +417,75 @@ atomic_fetchadd_int(volatile u_int *p, u_int v)
 
 #endif /* KLD_MODULE */
 
+#if defined(KLD_MODULE)
+
+#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP)                      \
+extern u_##TYPE        atomic_load_acq_##TYPE(volatile u_##TYPE *p);   \
+extern void    atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v);
+
+#else /* !KLD_MODULE */
+
+#if defined(_KERNEL) && !defined(SMP)
+/*
+ * We assume that a = b will do atomic loads and stores.  However, on a
+ * PentiumPro or higher, reads may pass writes, so for that case we have
+ * to use a serializing instruction (i.e. with LOCK) to do the load in
+ * SMP kernels.  For UP kernels, however, the cache of the single processor
+ * is always consistent, so we don't need any memory barriers.
+ */
+#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP)              \
+static __inline u_##TYPE                               \
+atomic_load_acq_##TYPE(volatile u_##TYPE *p)           \
+{                                                      \
+       return (*p);                                    \
+}                                                      \
+                                                       \
+static __inline void                                   \
+atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
+{                                                      \
+       *p = v;                                         \
+}                                                      \
+struct __hack
+
+#else /* !(_KERNEL && !SMP) */
+
+#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP)              \
+static __inline u_##TYPE                               \
+atomic_load_acq_##TYPE(volatile u_##TYPE *p)           \
+{                                                      \
+       u_##TYPE res;                                   \
+                                                       \
+       __asm __volatile(MPLOCKED LOP                   \
+       : "=a" (res),                   /* 0 */         \
+         "=m" (*p)                     /* 1 */         \
+       : "m" (*p)                      /* 2 */         \
+       : "memory");                                    \
+                                                       \
+       return (res);                                   \
+}                                                      \
+                                                       \
+/*                                                     \
+ * The XCHG instruction asserts LOCK automagically.    \
+ */                                                    \
+static __inline void                                   \
+atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
+{                                                      \
+       __asm __volatile(SOP                            \
+       : "=m" (*p),                    /* 0 */         \
+         "+r" (v)                      /* 1 */         \
+       : "m" (*p));                    /* 2 */         \
+}                                                      \
+struct __hack
+
+#endif /* _KERNEL && !SMP */
+
+#endif /* !KLD_MODULE */
+
+ATOMIC_STORE_LOAD(char, "cmpxchgb %b0,%1", "xchgb %b1,%0");
+ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0");
+ATOMIC_STORE_LOAD(int,  "cmpxchgl %0,%1",  "xchgl %1,%0");
+ATOMIC_STORE_LOAD(long, "cmpxchgq %0,%1",  "xchgq %1,%0");
+
 /* Acquire and release variants are identical to the normal ones. */
 #define        atomic_set_acq_char             atomic_set_char
 #define        atomic_set_rel_char             atomic_set_char
index 2339d30..14153a1 100644 (file)
 /*
  * Classes of processor.
  */
-#define        CPUCLASS_X86    0       /* X86 */
-#define        CPUCLASS_K8     1       /* K8 AMD64 class */
+#define CPUCLASS_286   0
+#define CPUCLASS_386   1
+#define CPUCLASS_486   2
+#define CPUCLASS_586   3
+#define CPUCLASS_686   4
 
 /*
  * Kinds of processor.
  */
-#define        CPU_X86         0       /* Intel */
-#define        CPU_CLAWHAMMER  1       /* AMD Clawhammer */
-#define        CPU_SLEDGEHAMMER 2      /* AMD Sledgehammer */
+#define        CPU_286         0       /* Intel 80286 */
+#define        CPU_386SX       1       /* Intel 80386SX */
+#define        CPU_386         2       /* Intel 80386DX */
+#define        CPU_486SX       3       /* Intel 80486SX */
+#define        CPU_486         4       /* Intel 80486DX */
+#define        CPU_586         5       /* Intel P.....m (I hate lawyers; it's TM) */
+#define        CPU_486DLC      6       /* Cyrix 486DLC */
+#define        CPU_686         7       /* Pentium Pro */
+#define        CPU_M1SC        8       /* Cyrix M1sc (aka 5x86) */
+#define        CPU_M1          9       /* Cyrix M1 (aka 6x86) */
+#define        CPU_BLUE        10      /* IBM BlueLighting CPU */
+#define        CPU_M2          11      /* Cyrix M2 (aka enhanced 6x86 with MMX */
+#define        CPU_NX586       12      /* NexGen (now AMD) 586 */
+#define        CPU_CY486DX     13      /* Cyrix 486S/DX/DX2/DX4 */
+#define        CPU_PII         14      /* Intel Pentium II */
+#define        CPU_PIII        15      /* Intel Pentium III */
+#define        CPU_P4          16      /* Intel Pentium 4 */
 
 #ifndef LOCORE
 extern int     cpu;
index 92a9bec..3f46a08 100644 (file)
 #define        MSR_BIOS_SIGN           0x08b
 #define        MSR_PERFCTR0            0x0c1
 #define        MSR_PERFCTR1            0x0c2
+#define MSR_IA32_EXT_CONFIG    0x0ee   /* Undocumented. Core Solo/Duo only */
 #define        MSR_MTRRcap             0x0fe
 #define        MSR_BBL_CR_ADDR         0x116
 #define        MSR_BBL_CR_DECC         0x118
index 33e8a5a..fe9ae8a 100644 (file)
@@ -107,8 +107,11 @@ SRCS+=             OsdCache.c
 # Machine-specific code for P-State
 SRCS+= acpi_pstate_machdep.c
 # Machine-specific code such as sleep/wakeup
-SRCS+= acpi_machdep.c acpi_wakecode.h acpi_wakeup.c
+SRCS+= acpi_machdep.c acpi_wakecode.h
 .if ${MACHINE_ARCH} == "i386"
+SRCS+= acpi_wakeup.c
+.endif
+.if ${MACHINE_ARCH} == "i386" || ${MACHINE_ARCH} == "amd64"
 # APIC enumerators
 #SRCS+=        madt.c
 SRCS+= pmtimer.c
index 5ec1dac..02839b1 100644 (file)
@@ -197,7 +197,7 @@ AcpiOsStall(UINT32 Microseconds)
     return_VOID;
 }
 
-UINT32
+ACPI_THREAD_ID
 AcpiOsGetThreadId(void)
 {
     struct proc *p;
index 39c9300..91dbb21 100644 (file)
@@ -387,7 +387,7 @@ AcpiOsAcquireLock (ACPI_SPINLOCK Spin)
 }
 
 void
-AcpiOsReleaseLock (ACPI_SPINLOCK Spin, UINT32 Flags)
+AcpiOsReleaseLock (ACPI_SPINLOCK Spin, ACPI_CPU_FLAGS Flags)
 {
 #ifdef ACPI_DEBUG_LOCKS
     if (Flags) {
index 2fab164..00a9475 100644 (file)
@@ -366,7 +366,13 @@ acpi_timer_test(void)
     max = 0;
 
     /* Test the timer with interrupts disabled to get accurate results. */
+#if defined(__i386__)
     s = read_eflags();
+#elif defined(__amd64__)
+    s = read_rflags();
+#else
+#error "no read_eflags"
+#endif
     cpu_disable_intr();
     last = acpi_timer_read();
     for (n = 0; n < 2000; n++) {
@@ -378,7 +384,13 @@ acpi_timer_test(void)
            min = delta;
        last = this;
     }
+#if defined(__i386__)
     write_eflags(s);
+#elif defined(__amd64__)
+    write_rflags(s);
+#else
+#error "no read_eflags"
+#endif
 
     if (max - min > 2)
        n = 0;
diff --git a/sys/dev/video/amd64/Makefile b/sys/dev/video/amd64/Makefile
new file mode 100644 (file)
index 0000000..7684840
--- /dev/null
@@ -0,0 +1,6 @@
+#
+#
+
+SUBDIR=
+
+.include <bsd.subdir.mk>
index 355d8fd..722a9ec 100644 (file)
@@ -1,9 +1,9 @@
 # Used by the device build to check for device support
 #
-# $DragonFly: src/sys/platform/pc64/Makefile.inc,v 1.2 2007/09/23 04:29:31 yanyh Exp $
 
-DEV_SUPPORT=
+DEV_SUPPORT=   acpica5 agp bridge crypto disk drm est misc netif \
+               pccard powermng raid serial sound usbmisc video
 
-# SYSCONS_APM_SUPPORT=1
-BOOT0CFG_SUPPORT=1
+#SYSCONS_APM_SUPPORT=1
+BOOT0CFG_SUPPORT=1
 
diff --git a/sys/platform/pc64/acpica5/Makefile b/sys/platform/pc64/acpica5/Makefile
new file mode 100644 (file)
index 0000000..9ecdafc
--- /dev/null
@@ -0,0 +1,26 @@
+# $FreeBSD: src/sys/i386/acpica/Makefile,v 1.6 2004/04/13 13:43:11 des Exp $
+# $DragonFly: src/sys/platform/pc32/acpica5/Makefile,v 1.2 2004/06/27 08:52:45 dillon Exp $
+#
+
+# Correct path for kernel builds
+# Don't rely on the kernel's .depend file
+.ifdef MAKESRCPATH
+.PATH: ${MAKESRCPATH}
+DEPENDFILE=
+.else
+MAKESRCPATH= ${.CURDIR}
+CLEANFILES= acpi_wakecode.h acpi_wakecode.bin acpi_wakecode.o
+.endif
+CFLAGS+=       -I. -I@
+
+all: acpi_wakecode.h
+
+acpi_wakecode.o: acpi_wakecode.S
+
+acpi_wakecode.bin: acpi_wakecode.o
+       objcopy -S -O binary acpi_wakecode.o acpi_wakecode.bin
+
+acpi_wakecode.h: acpi_wakecode.bin acpi_wakecode.o
+       sh ${MAKESRCPATH}/genwakecode.sh > acpi_wakecode.h
+
+.include <bsd.prog.mk>
diff --git a/sys/platform/pc64/acpica5/OsdEnvironment.c b/sys/platform/pc64/acpica5/OsdEnvironment.c
new file mode 100644 (file)
index 0000000..0e68087
--- /dev/null
@@ -0,0 +1,75 @@
+/*-
+ * Copyright (c) 2000,2001 Michael Smith
+ * Copyright (c) 2000 BSDi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: src/sys/i386/acpica/OsdEnvironment.c,v 1.10 2004/05/06 02:18:58 njl Exp $
+ * $DragonFly: src/sys/platform/pc32/acpica5/OsdEnvironment.c,v 1.3 2007/01/17 17:31:19 y0netan1 Exp $
+ */
+
+/*
+ * 6.1 : Environmental support
+ */
+#include <sys/types.h>
+#include <sys/linker_set.h>
+#include <sys/sysctl.h>
+
+#include "acpi.h"
+
+static u_long i386_acpi_root;
+
+SYSCTL_ULONG(_machdep, OID_AUTO, acpi_root, CTLFLAG_RD, &i386_acpi_root, 0,
+            "The physical address of the RSDP");
+
+ACPI_STATUS
+AcpiOsInitialize(void)
+{
+       return(0);
+}
+
+ACPI_STATUS
+AcpiOsTerminate(void)
+{
+       return(0);
+}
+
+ACPI_PHYSICAL_ADDRESS
+AcpiOsGetRootPointer(void)
+{
+       ACPI_NATIVE_UINT ptr;
+       ACPI_STATUS status;
+
+       if (i386_acpi_root == 0) {
+               /*
+                * The loader passes the physical address at which it found the
+                * RSDP in a hint.  We could recover this rather than searching
+                * manually here.
+                */
+               status = AcpiFindRootPointer(&ptr);
+               if (status == AE_OK)
+                       i386_acpi_root = ptr;
+       }
+
+       return (i386_acpi_root);
+}
diff --git a/sys/platform/pc64/acpica5/acpi_machdep.c b/sys/platform/pc64/acpica5/acpi_machdep.c
new file mode 100644 (file)
index 0000000..723c568
--- /dev/null
@@ -0,0 +1,348 @@
+/*-
+ * Copyright (c) 2001 Mitsuru IWASAKI
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: src/sys/i386/acpica/acpi_machdep.c,v 1.20 2004/05/05 19:51:15 njl Exp $
+ * $DragonFly: src/sys/platform/pc32/acpica5/acpi_machdep.c,v 1.14 2008/09/29 06:59:45 hasso Exp $
+ */
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/device.h>
+#include <sys/fcntl.h>
+#include <sys/kernel.h>
+#include <sys/sysctl.h>
+#include <sys/uio.h>
+
+#include "acpi.h"
+#include <dev/acpica5/acpivar.h>
+#include <dev/acpica5/acpiio.h>
+
+static device_t        acpi_dev;
+
+/*
+ * APM driver emulation
+ */
+
+#include <sys/selinfo.h>
+
+#include <machine/apm_bios.h>
+#include <machine/pc/bios.h>
+#include <machine_base/apm/apm.h>
+
+uint32_t acpi_reset_video = 1;
+TUNABLE_INT("hw.acpi.reset_video", &acpi_reset_video);
+
+static int intr_model = ACPI_INTR_PIC;
+static struct apm_softc        apm_softc;
+
+static d_open_t apmopen;
+static d_close_t apmclose;
+static d_write_t apmwrite;
+static d_ioctl_t apmioctl;
+static d_poll_t apmpoll;
+
+#define CDEV_MAJOR 39
+static struct dev_ops apm_ops = {
+       { "apm", CDEV_MAJOR, 0 },
+        .d_open = apmopen,
+        .d_close = apmclose,
+       .d_write = apmwrite,
+        .d_ioctl = apmioctl,
+       .d_poll = apmpoll
+};
+
+static int
+acpi_capm_convert_battstate(struct  acpi_battinfo *battp)
+{
+       int     state;
+
+       state = 0xff;   /* XXX unknown */
+
+       if (battp->state & ACPI_BATT_STAT_DISCHARG) {
+               if (battp->cap >= 50)
+                       state = 0;      /* high */
+               else
+                       state = 1;      /* low */
+       }
+       if (battp->state & ACPI_BATT_STAT_CRITICAL)
+               state = 2;              /* critical */
+       if (battp->state & ACPI_BATT_STAT_CHARGING)
+               state = 3;              /* charging */
+
+       /* If still unknown, determine it based on the battery capacity. */
+       if (state == 0xff) {
+               if (battp->cap >= 50)
+                       state = 0;      /* high */
+               else
+                       state = 1;      /* low */
+       }
+
+       return (state);
+}
+
+static int
+acpi_capm_convert_battflags(struct  acpi_battinfo *battp)
+{
+       int     flags;
+
+       flags = 0;
+
+       if (battp->cap >= 50)
+               flags |= APM_BATT_HIGH;
+       else {
+               if (battp->state & ACPI_BATT_STAT_CRITICAL)
+                       flags |= APM_BATT_CRITICAL;
+               else
+                       flags |= APM_BATT_LOW;
+       }
+       if (battp->state & ACPI_BATT_STAT_CHARGING)
+               flags |= APM_BATT_CHARGING;
+       if (battp->state == ACPI_BATT_STAT_NOT_PRESENT)
+               flags = APM_BATT_NOT_PRESENT;
+
+       return (flags);
+}
+
+static int
+acpi_capm_get_info(apm_info_t aip)
+{
+       int     acline;
+       struct  acpi_battinfo batt;
+
+       aip->ai_infoversion = 1;
+       aip->ai_major       = 1;
+       aip->ai_minor       = 2;
+       aip->ai_status      = apm_softc.active;
+       aip->ai_capabilities= 0xff00;   /* XXX unknown */
+
+       if (acpi_acad_get_acline(&acline))
+               aip->ai_acline = 0xff;          /* unknown */
+       else
+               aip->ai_acline = acline;        /* on/off */
+
+       if (acpi_battery_get_battinfo(NULL, &batt)) {
+               aip->ai_batt_stat = 0xff;       /* unknown */
+               aip->ai_batt_life = 0xff;       /* unknown */
+               aip->ai_batt_time = -1;         /* unknown */
+               aip->ai_batteries = 0;
+       } else {
+               aip->ai_batt_stat = acpi_capm_convert_battstate(&batt);
+               aip->ai_batt_life = batt.cap;
+               aip->ai_batt_time = (batt.min == -1) ? -1 : batt.min * 60;
+               aip->ai_batteries = acpi_battery_get_units();
+       }
+
+       return (0);
+}
+
+static int
+acpi_capm_get_pwstatus(apm_pwstatus_t app)
+{
+       device_t dev;
+       int     acline, unit, error;
+       struct  acpi_battinfo batt;
+
+       if (app->ap_device != PMDV_ALLDEV &&
+           (app->ap_device < PMDV_BATT0 || app->ap_device > PMDV_BATT_ALL))
+               return (1);
+
+       if (app->ap_device == PMDV_ALLDEV)
+               error = acpi_battery_get_battinfo(NULL, &batt);
+       else {
+               unit = app->ap_device - PMDV_BATT0;
+               dev = devclass_get_device(devclass_find("battery"), unit);
+               if (dev != NULL)
+                       error = acpi_battery_get_battinfo(dev, &batt);
+               else
+                       error = ENXIO;
+       }
+       if (error)
+               return (1);
+
+       app->ap_batt_stat = acpi_capm_convert_battstate(&batt);
+       app->ap_batt_flag = acpi_capm_convert_battflags(&batt);
+       app->ap_batt_life = batt.cap;
+       app->ap_batt_time = (batt.min == -1) ? -1 : batt.min * 60;
+
+       if (acpi_acad_get_acline(&acline))
+               app->ap_acline = 0xff;          /* unknown */
+       else
+               app->ap_acline = acline;        /* on/off */
+
+       return (0);
+}
+
+static int
+apmopen(struct dev_open_args *ap)
+{
+       return (0);
+}
+
+static int
+apmclose(struct dev_close_args *ap)
+{
+       return (0);
+}
+
+static int
+apmioctl(struct dev_ioctl_args *ap)
+{
+       int     error = 0;
+       struct  acpi_softc *acpi_sc;
+       struct apm_info info;
+       apm_info_old_t aiop;
+
+       acpi_sc = device_get_softc(acpi_dev);
+
+       switch (ap->a_cmd) {
+       case APMIO_SUSPEND:
+               if ((ap->a_fflag & FWRITE) == 0)
+                       return (EPERM);
+               if (apm_softc.active)
+                       acpi_SetSleepState(acpi_sc, acpi_sc->acpi_suspend_sx);
+               else
+                       error = EINVAL;
+               break;
+       case APMIO_STANDBY:
+               if ((ap->a_fflag & FWRITE) == 0)
+                       return (EPERM);
+               if (apm_softc.active)
+                       acpi_SetSleepState(acpi_sc, acpi_sc->acpi_standby_sx);
+               else
+                       error = EINVAL;
+               break;
+       case APMIO_GETINFO_OLD:
+               if (acpi_capm_get_info(&info))
+                       error = ENXIO;
+               aiop = (apm_info_old_t)ap->a_data;
+               aiop->ai_major = info.ai_major;
+               aiop->ai_minor = info.ai_minor;
+               aiop->ai_acline = info.ai_acline;
+               aiop->ai_batt_stat = info.ai_batt_stat;
+               aiop->ai_batt_life = info.ai_batt_life;
+               aiop->ai_status = info.ai_status;
+               break;
+       case APMIO_GETINFO:
+               if (acpi_capm_get_info((apm_info_t)ap->a_data))
+                       error = ENXIO;
+               break;
+       case APMIO_GETPWSTATUS:
+               if (acpi_capm_get_pwstatus((apm_pwstatus_t)ap->a_data))
+                       error = ENXIO;
+               break;
+       case APMIO_ENABLE:
+               if ((ap->a_fflag & FWRITE) == 0)
+                       return (EPERM);
+               apm_softc.active = 1;
+               break;
+       case APMIO_DISABLE:
+               if ((ap->a_fflag & FWRITE) == 0)
+                       return (EPERM);
+               apm_softc.active = 0;
+               break;
+       case APMIO_HALTCPU:
+               break;
+       case APMIO_NOTHALTCPU:
+               break;
+       case APMIO_DISPLAY:
+               if ((ap->a_fflag & FWRITE) == 0)
+                       return (EPERM);
+               break;
+       case APMIO_BIOS:
+               if ((ap->a_fflag & FWRITE) == 0)
+                       return (EPERM);
+               bzero(ap->a_data, sizeof(struct apm_bios_arg));
+               break;
+       default:
+               error = EINVAL;
+               break;
+       }
+
+       return (error);
+}
+
+static int
+apmwrite(struct dev_write_args *ap)
+{
+       return (ap->a_uio->uio_resid);
+}
+
+static int
+apmpoll(struct dev_poll_args *ap)
+{
+       ap->a_events = 0;
+       return (0);
+}
+
+static void
+acpi_capm_init(struct acpi_softc *sc)
+{
+       dev_ops_add(&apm_ops, 0, 0);
+        make_dev(&apm_ops, 0, 0, 5, 0664, "apm");
+        make_dev(&apm_ops, 8, 0, 5, 0664, "apm");
+       kprintf("Warning: ACPI is disabling APM's device.  You can't run both\n");
+}
+
+int
+acpi_machdep_init(device_t dev)
+{
+       struct  acpi_softc *sc;
+
+       acpi_dev = dev;
+       sc = device_get_softc(acpi_dev);
+
+       /*
+        * XXX: Prevent the PnP BIOS code from interfering with
+        * our own scan of ISA devices.
+        */
+#if 0
+       PnPBIOStable = NULL;
+#endif
+
+       acpi_capm_init(sc);
+
+       acpi_install_wakeup_handler(sc);
+
+       if (intr_model == ACPI_INTR_PIC)
+               BUS_CONFIG_INTR(dev, AcpiGbl_FADT.SciInterrupt,
+                   INTR_TRIGGER_LEVEL, INTR_POLARITY_LOW);
+       else
+               acpi_SetIntrModel(intr_model);
+
+       SYSCTL_ADD_UINT(&sc->acpi_sysctl_ctx,
+           SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO,
+           "reset_video", CTLFLAG_RD | CTLFLAG_RW, &acpi_reset_video, 0,
+           "Call the VESA reset BIOS vector on the resume path");
+
+       return (0);
+}
+
+void
+acpi_SetDefaultIntrModel(int model)
+{
+
+       intr_model = model;
+}
diff --git a/sys/platform/pc64/acpica5/acpi_pstate_machdep.c b/sys/platform/pc64/acpica5/acpi_pstate_machdep.c
new file mode 100644 (file)
index 0000000..e0748fa
--- /dev/null
@@ -0,0 +1,625 @@
+/*
+ * Copyright (c) 2009 The DragonFly Project.  All rights reserved.
+ *
+ * This code is derived from software contributed to The DragonFly Project
+ * by Sepherosa Ziehau <sepherosa@gmail.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ * 3. Neither the name of The DragonFly Project nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific, prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/globaldata.h>
+
+#include <machine/md_var.h>
+#include <machine/cpufunc.h>
+#include <machine/cpufreq.h>
+#include <machine/specialreg.h>
+
+#include "acpi.h"
+#include "acpi_cpu_pstate.h"
+
+#define AMD_APMI_HWPSTATE              0x80
+
+#define AMD_MSR_PSTATE_CSR_MASK                0x7ULL
+#define AMD1X_MSR_PSTATE_CTL           0xc0010062
+#define AMD1X_MSR_PSTATE_ST            0xc0010063
+
+#define AMD_MSR_PSTATE_EN              0x8000000000000000ULL
+
+#define AMD10_MSR_PSTATE_START         0xc0010064
+#define AMD10_MSR_PSTATE_COUNT         5
+
+#define AMD0F_PST_CTL_FID(cval)                (((cval) >> 0)  & 0x3f)
+#define AMD0F_PST_CTL_VID(cval)                (((cval) >> 6)  & 0x1f)
+#define AMD0F_PST_CTL_VST(cval)                (((cval) >> 11) & 0x7f)
+#define AMD0F_PST_CTL_MVS(cval)                (((cval) >> 18) & 0x3)
+#define AMD0F_PST_CTL_PLLTIME(cval)    (((cval) >> 20) & 0x7f)
+#define AMD0F_PST_CTL_RVO(cval)                (((cval) >> 28) & 0x3)
+#define AMD0F_PST_CTL_IRT(cval)                (((cval) >> 30) & 0x3)
+
+#define AMD0F_PST_ST_FID(sval)         (((sval) >> 0) & 0x3f)
+#define AMD0F_PST_ST_VID(sval)         (((sval) >> 6) & 0x3f)
+
+#define INTEL_MSR_MISC_ENABLE          0x1a0
+#define INTEL_MSR_MISC_EST_EN          0x10000ULL
+
+#define INTEL_MSR_PERF_STATUS          0x198
+#define INTEL_MSR_PERF_CTL             0x199
+#define INTEL_MSR_PERF_MASK            0xffffULL
+
+static const struct acpi_pst_md *
+               acpi_pst_amd_probe(void);
+static int     acpi_pst_amd_check_csr(const struct acpi_pst_res *,
+                   const struct acpi_pst_res *);
+static int     acpi_pst_amd1x_check_pstates(const struct acpi_pstate *, int,
+                   uint32_t, uint32_t);
+static int     acpi_pst_amd10_check_pstates(const struct acpi_pstate *, int);
+static int     acpi_pst_amd0f_check_pstates(const struct acpi_pstate *, int);
+static int     acpi_pst_amd_init(const struct acpi_pst_res *,
+                   const struct acpi_pst_res *);
+static int     acpi_pst_amd1x_set_pstate(const struct acpi_pst_res *,
+                   const struct acpi_pst_res *, const struct acpi_pstate *);
+static int     acpi_pst_amd0f_set_pstate(const struct acpi_pst_res *,
+                   const struct acpi_pst_res *, const struct acpi_pstate *);
+static const struct acpi_pstate *
+               acpi_pst_amd1x_get_pstate(const struct acpi_pst_res *,
+                   const struct acpi_pstate *, int);
+static const struct acpi_pstate *
+               acpi_pst_amd0f_get_pstate(const struct acpi_pst_res *,
+                   const struct acpi_pstate *, int);
+
+static const struct acpi_pst_md *
+               acpi_pst_intel_probe(void);
+static int     acpi_pst_intel_check_csr(const struct acpi_pst_res *,
+                   const struct acpi_pst_res *);
+static int     acpi_pst_intel_check_pstates(const struct acpi_pstate *, int);
+static int     acpi_pst_intel_init(const struct acpi_pst_res *,
+                   const struct acpi_pst_res *);
+static int     acpi_pst_intel_set_pstate(const struct acpi_pst_res *,
+                   const struct acpi_pst_res *, const struct acpi_pstate *);
+static const struct acpi_pstate *
+               acpi_pst_intel_get_pstate(const struct acpi_pst_res *,
+                   const struct acpi_pstate *, int);
+
+static int     acpi_pst_md_gas_asz(const ACPI_GENERIC_ADDRESS *);
+static int     acpi_pst_md_gas_verify(const ACPI_GENERIC_ADDRESS *);
+static uint32_t        acpi_pst_md_res_read(const struct acpi_pst_res *);
+static void    acpi_pst_md_res_write(const struct acpi_pst_res *, uint32_t);
+
+static const struct acpi_pst_md        acpi_pst_amd10 = {
+       .pmd_check_csr          = acpi_pst_amd_check_csr,
+       .pmd_check_pstates      = acpi_pst_amd10_check_pstates,
+       .pmd_init               = acpi_pst_amd_init,
+       .pmd_set_pstate         = acpi_pst_amd1x_set_pstate,
+       .pmd_get_pstate         = acpi_pst_amd1x_get_pstate
+};
+
+static const struct acpi_pst_md        acpi_pst_amd0f = {
+       .pmd_check_csr          = acpi_pst_amd_check_csr,
+       .pmd_check_pstates      = acpi_pst_amd0f_check_pstates,
+       .pmd_init               = acpi_pst_amd_init,
+       .pmd_set_pstate         = acpi_pst_amd0f_set_pstate,
+       .pmd_get_pstate         = acpi_pst_amd0f_get_pstate
+};
+
+static const struct acpi_pst_md acpi_pst_intel = {
+       .pmd_check_csr          = acpi_pst_intel_check_csr,
+       .pmd_check_pstates      = acpi_pst_intel_check_pstates,
+       .pmd_init               = acpi_pst_intel_init,
+       .pmd_set_pstate         = acpi_pst_intel_set_pstate,
+       .pmd_get_pstate         = acpi_pst_intel_get_pstate
+};
+
+const struct acpi_pst_md *
+acpi_pst_md_probe(void)
+{
+       if (strcmp(cpu_vendor, "AuthenticAMD") == 0)
+               return acpi_pst_amd_probe();
+       else if (strcmp(cpu_vendor, "GenuineIntel") == 0)
+               return acpi_pst_intel_probe();
+       return NULL;
+}
+
+static const struct acpi_pst_md *
+acpi_pst_amd_probe(void)
+{
+       uint32_t regs[4], ext_family;
+
+       if ((cpu_id & 0x00000f00) != 0x00000f00)
+               return NULL;
+
+       /* Check whether APMI exists */
+       do_cpuid(0x80000000, regs);
+       if (regs[0] < 0x80000007)
+               return NULL;
+
+       /* Fetch APMI */
+       do_cpuid(0x80000007, regs);
+
+       ext_family = cpu_id & 0x0ff00000;
+       switch (ext_family) {
+       case 0x00000000:        /* Family 0fh */
+               if ((regs[3] & 0x06) == 0x06)
+                       return &acpi_pst_amd0f;
+               break;
+
+       case 0x00100000:        /* Family 10h */
+               if (regs[3] & 0x80)
+                       return &acpi_pst_amd10;
+               break;
+
+       default:
+               break;
+       }
+       return NULL;
+}
+
+static int
+acpi_pst_amd_check_csr(const struct acpi_pst_res *ctrl,
+                      const struct acpi_pst_res *status)
+{
+       if (ctrl->pr_gas.SpaceId != ACPI_ADR_SPACE_FIXED_HARDWARE) {
+               kprintf("cpu%d: Invalid P-State control register\n", mycpuid);
+               return EINVAL;
+       }
+       if (status->pr_gas.SpaceId != ACPI_ADR_SPACE_FIXED_HARDWARE) {
+               kprintf("cpu%d: Invalid P-State status register\n", mycpuid);
+               return EINVAL;
+       }
+       return 0;
+}
+
+static int
+acpi_pst_amd1x_check_pstates(const struct acpi_pstate *pstates, int npstates,
+                            uint32_t msr_start, uint32_t msr_end)
+{
+       int i;
+
+       /*
+        * Make sure that related MSR P-State registers are enabled.
+        *
+        * NOTE:
+        * We don't check status register value here;
+        * it will not be used.
+        */
+       for (i = 0; i < npstates; ++i) {
+               uint64_t pstate;
+               uint32_t msr;
+
+               msr = msr_start +
+                     (pstates[i].st_cval & AMD_MSR_PSTATE_CSR_MASK);
+               if (msr >= msr_end) {
+                       kprintf("cpu%d: MSR P-State register %#08x "
+                               "does not exist\n", mycpuid, msr);
+                       return EINVAL;
+               }
+
+               pstate = rdmsr(msr);
+               if ((pstate & AMD_MSR_PSTATE_EN) == 0) {
+                       kprintf("cpu%d: MSR P-State register %#08x "
+                               "is not enabled\n", mycpuid, msr);
+                       return EINVAL;
+               }
+       }
+       return 0;
+}
+
+static int
+acpi_pst_amd10_check_pstates(const struct acpi_pstate *pstates, int npstates)
+{
+       /* Only P0-P4 are supported */
+       if (npstates > AMD10_MSR_PSTATE_COUNT) {
+               kprintf("cpu%d: only P0-P4 is allowed\n", mycpuid);
+               return EINVAL;
+       }
+
+       return acpi_pst_amd1x_check_pstates(pstates, npstates,
+                       AMD10_MSR_PSTATE_START,
+                       AMD10_MSR_PSTATE_START + AMD10_MSR_PSTATE_COUNT);
+}
+
+static int
+acpi_pst_amd1x_set_pstate(const struct acpi_pst_res *ctrl __unused,
+                         const struct acpi_pst_res *status __unused,
+                         const struct acpi_pstate *pstate)
+{
+       uint64_t cval;
+
+       cval = pstate->st_cval & AMD_MSR_PSTATE_CSR_MASK;
+       wrmsr(AMD1X_MSR_PSTATE_CTL, cval);
+
+       /*
+        * Don't check AMD1X_MSR_PSTATE_ST here, since it is
+        * affected by various P-State limits.
+        *
+        * For details:
+        * AMD Family 10h Processor BKDG Rev 3.20 (#31116)
+        * 2.4.2.4 P-state Transition Behavior
+        */
+
+       return 0;
+}
+
+static const struct acpi_pstate *
+acpi_pst_amd1x_get_pstate(const struct acpi_pst_res *status __unused,
+                         const struct acpi_pstate *pstates, int npstates)
+{
+       uint64_t sval;
+       int i;
+
+       sval = rdmsr(AMD1X_MSR_PSTATE_ST) & AMD_MSR_PSTATE_CSR_MASK;
+       for (i = 0; i < npstates; ++i) {
+               if ((pstates[i].st_sval & AMD_MSR_PSTATE_CSR_MASK) == sval)
+                       return &pstates[i];
+       }
+       return NULL;
+}
+
+static int
+acpi_pst_amd0f_check_pstates(const struct acpi_pstate *pstates, int npstates)
+{
+       struct amd0f_fidvid fv_max, fv_min;
+       int i;
+
+       amd0f_fidvid_limit(&fv_min, &fv_max);
+
+       for (i = 0; i < npstates; ++i) {
+               const struct acpi_pstate *p = &pstates[i];
+               uint32_t fid, vid, mvs, rvo;
+               int mvs_mv, rvo_mv;
+
+               fid = AMD0F_PST_CTL_FID(p->st_cval);
+               vid = AMD0F_PST_CTL_VID(p->st_cval);
+
+               if (fid > fv_max.fid || fid < fv_min.fid) {
+                       kprintf("cpu%d: Invalid FID %#x [%#x, %#x]\n",
+                               mycpuid, fid, fv_min.fid, fv_max.fid);
+                       return EINVAL;
+               }
+               if (vid < fv_max.vid || vid > fv_min.vid) {
+                       kprintf("cpu%d: Invalid VID %#x [%#x, %#x]\n",
+                               mycpuid, vid, fv_max.vid, fv_min.fid);
+                       return EINVAL;
+               }
+
+               mvs = AMD0F_PST_CTL_MVS(p->st_cval);
+               rvo = AMD0F_PST_CTL_RVO(p->st_cval);
+
+               /* Only 0 is allowed, i.e. 25mV stepping */
+               if (mvs != 0) {
+                       kprintf("cpu%d: Invalid MVS %#x\n", mycpuid, mvs);
+                       return EINVAL;
+               }
+
+               /* -> mV */
+               mvs_mv = 25 * (1 << mvs);
+               rvo_mv = 25 * rvo;
+               if (rvo_mv % mvs_mv != 0) {
+                       kprintf("cpu%d: Invalid MVS/RVO (%#x/%#x)\n",
+                               mycpuid, mvs, rvo);
+                       return EINVAL;
+               }
+       }
+       return 0;
+}
+
+static int
+acpi_pst_amd0f_set_pstate(const struct acpi_pst_res *ctrl __unused,
+                         const struct acpi_pst_res *status __unused,
+                         const struct acpi_pstate *pstate)
+{
+       struct amd0f_fidvid fv;
+       struct amd0f_xsit xsit;
+
+       fv.fid = AMD0F_PST_CTL_FID(pstate->st_cval);
+       fv.vid = AMD0F_PST_CTL_VID(pstate->st_cval);
+
+       xsit.rvo = AMD0F_PST_CTL_RVO(pstate->st_cval);
+       xsit.mvs = AMD0F_PST_CTL_MVS(pstate->st_cval);
+       xsit.vst = AMD0F_PST_CTL_VST(pstate->st_cval);
+       xsit.pll_time = AMD0F_PST_CTL_PLLTIME(pstate->st_cval);
+       xsit.irt = AMD0F_PST_CTL_IRT(pstate->st_cval);
+
+       return amd0f_set_fidvid(&fv, &xsit);
+}
+
+static const struct acpi_pstate *
+acpi_pst_amd0f_get_pstate(const struct acpi_pst_res *status __unused,
+                         const struct acpi_pstate *pstates, int npstates)
+{
+       struct amd0f_fidvid fv;
+       int error, i;
+
+       error = amd0f_get_fidvid(&fv);
+       if (error)
+               return NULL;
+
+       for (i = 0; i < npstates; ++i) {
+               const struct acpi_pstate *p = &pstates[i];
+
+               if (fv.fid == AMD0F_PST_ST_FID(p->st_sval) &&
+                   fv.vid == AMD0F_PST_ST_VID(p->st_sval))
+                       return p;
+       }
+       return NULL;
+}
+
+static int
+acpi_pst_amd_init(const struct acpi_pst_res *ctrl __unused,
+                 const struct acpi_pst_res *status __unused)
+{
+       return 0;
+}
+
+static const struct acpi_pst_md *
+acpi_pst_intel_probe(void)
+{
+       uint32_t family;
+
+       if ((cpu_feature2 & CPUID2_EST) == 0)
+               return NULL;
+
+       family = cpu_id & 0xf00;
+       if (family != 0xf00 && family != 0x600)
+               return NULL;
+       return &acpi_pst_intel;
+}
+
+static int
+acpi_pst_intel_check_csr(const struct acpi_pst_res *ctrl,
+                        const struct acpi_pst_res *status)
+{
+       int error;
+
+       if (ctrl->pr_gas.SpaceId != status->pr_gas.SpaceId) {
+               kprintf("cpu%d: P-State control(%d)/status(%d) registers have "
+                       "different SpaceId", mycpuid,
+                       ctrl->pr_gas.SpaceId, status->pr_gas.SpaceId);
+               return EINVAL;
+       }
+
+       switch (ctrl->pr_gas.SpaceId) {
+       case ACPI_ADR_SPACE_FIXED_HARDWARE:
+               if (ctrl->pr_res != NULL || status->pr_res != NULL) {
+                       /* XXX should panic() */
+                       kprintf("cpu%d: Allocated resource for fixed hardware "
+                               "registers\n", mycpuid);
+                       return EINVAL;
+               }
+               break;
+
+       case ACPI_ADR_SPACE_SYSTEM_IO:
+               if (ctrl->pr_res == NULL) {
+                       kprintf("cpu%d: ioport allocation failed for control "
+                               "register\n", mycpuid);
+                       return ENXIO;
+               }
+               error = acpi_pst_md_gas_verify(&ctrl->pr_gas);
+               if (error) {
+                       kprintf("cpu%d: Invalid control register GAS\n",
+                               mycpuid);
+                       return error;
+               }
+
+               if (status->pr_res == NULL) {
+                       kprintf("cpu%d: ioport allocation failed for status "
+                               "register\n", mycpuid);
+                       return ENXIO;
+               }
+               error = acpi_pst_md_gas_verify(&status->pr_gas);
+               if (error) {
+                       kprintf("cpu%d: Invalid status register GAS\n",
+                               mycpuid);
+                       return error;
+               }
+               break;
+
+       default:
+               kprintf("cpu%d: Invalid P-State control/status register "
+                       "SpaceId %d\n", mycpuid, ctrl->pr_gas.SpaceId);
+               return EOPNOTSUPP;
+       }
+       return 0;
+}
+
+static int
+acpi_pst_intel_check_pstates(const struct acpi_pstate *pstates __unused,
+                            int npstates __unused)
+{
+       return 0;
+}
+
+static int
+acpi_pst_intel_init(const struct acpi_pst_res *ctrl __unused,
+                   const struct acpi_pst_res *status __unused)
+{
+       uint32_t family, model;
+       uint64_t misc_enable;
+
+       family = cpu_id & 0xf00;
+       if (family == 0xf00) {
+               /* EST enable bit is reserved in INTEL_MSR_MISC_ENABLE */
+               return 0;
+       }
+       KKASSERT(family == 0x600);
+
+       model = ((cpu_id & 0xf0000) >> 12) | ((cpu_id & 0xf0) >> 4);
+       if (model < 0xd) {
+               /* EST enable bit is reserved in INTEL_MSR_MISC_ENABLE */
+               return 0;
+       }
+
+       misc_enable = rdmsr(INTEL_MSR_MISC_ENABLE);
+       if ((misc_enable & INTEL_MSR_MISC_EST_EN) == 0) {
+               misc_enable |= INTEL_MSR_MISC_EST_EN;
+               wrmsr(INTEL_MSR_MISC_ENABLE, misc_enable);
+
+               misc_enable = rdmsr(INTEL_MSR_MISC_ENABLE);
+               if ((misc_enable & INTEL_MSR_MISC_EST_EN) == 0) {
+                       kprintf("cpu%d: Can't enable EST\n", mycpuid);
+                       return EIO;
+               }
+       }
+       return 0;
+}
+
+static int
+acpi_pst_intel_set_pstate(const struct acpi_pst_res *ctrl,
+                         const struct acpi_pst_res *status __unused,
+                         const struct acpi_pstate *pstate)
+{
+       if (ctrl->pr_res != NULL) {
+               acpi_pst_md_res_write(ctrl, pstate->st_cval);
+       } else {
+               uint64_t ctl;
+
+               ctl = rdmsr(INTEL_MSR_PERF_CTL);
+               ctl &= ~INTEL_MSR_PERF_MASK;
+               ctl |= (pstate->st_cval & INTEL_MSR_PERF_MASK);
+               wrmsr(INTEL_MSR_PERF_CTL, ctl);
+       }
+       return 0;
+}
+
+static const struct acpi_pstate *
+acpi_pst_intel_get_pstate(const struct acpi_pst_res *status,
+                         const struct acpi_pstate *pstates, int npstates)
+{
+       int i;
+
+       if (status->pr_res != NULL) {
+               uint32_t st;
+
+               st = acpi_pst_md_res_read(status);
+               for (i = 0; i < npstates; ++i) {
+                       if (pstates[i].st_sval == st)
+                               return &pstates[i];
+               }
+       } else {
+               uint64_t sval;
+
+               sval = rdmsr(INTEL_MSR_PERF_STATUS) & INTEL_MSR_PERF_MASK;
+               for (i = 0; i < npstates; ++i) {
+                       if ((pstates[i].st_sval & INTEL_MSR_PERF_MASK) == sval)
+                               return &pstates[i];
+               }
+       }
+       return NULL;
+}
+
+static int
+acpi_pst_md_gas_asz(const ACPI_GENERIC_ADDRESS *gas)
+{
+       int asz;
+
+       if (gas->AccessWidth != 0)
+               asz = gas->AccessWidth;
+       else
+               asz = gas->BitWidth / NBBY;
+       switch (asz) {
+       case 1:
+       case 2:
+       case 4:
+               break;
+       default:
+               asz = 0;
+               break;
+       }
+       return asz;
+}
+
+static int
+acpi_pst_md_gas_verify(const ACPI_GENERIC_ADDRESS *gas)
+{
+       int reg, end, asz;
+
+       if (gas->BitOffset % NBBY != 0)
+               return EINVAL;
+
+       end = gas->BitWidth / NBBY;
+       reg = gas->BitOffset / NBBY;
+
+       if (reg >= end)
+               return EINVAL;
+
+       asz = acpi_pst_md_gas_asz(gas);
+       if (asz == 0)
+               return EINVAL;
+
+       if (reg + asz > end)
+               return EINVAL;
+       return 0;
+}
+
+static uint32_t
+acpi_pst_md_res_read(const struct acpi_pst_res *res)
+{
+       int asz, reg;
+
+       KKASSERT(res->pr_res != NULL);
+       asz = acpi_pst_md_gas_asz(&res->pr_gas);
+       reg = res->pr_gas.BitOffset / NBBY;
+
+       switch (asz) {
+       case 1:
+               return bus_space_read_1(res->pr_bt, res->pr_bh, reg);
+       case 2:
+               return bus_space_read_2(res->pr_bt, res->pr_bh, reg);
+       case 4:
+               return bus_space_read_4(res->pr_bt, res->pr_bh, reg);
+       }
+       panic("unsupported access width %d\n", asz);
+
+       /* NEVER REACHED */
+       return 0;
+}
+
+static void
+acpi_pst_md_res_write(const struct acpi_pst_res *res, uint32_t val)
+{
+       int asz, reg;
+
+       KKASSERT(res->pr_res != NULL);
+       asz = acpi_pst_md_gas_asz(&res->pr_gas);
+       reg = res->pr_gas.BitOffset / NBBY;
+
+       switch (asz) {
+       case 1:
+               bus_space_write_1(res->pr_bt, res->pr_bh, reg, val);
+               break;
+       case 2:
+               bus_space_write_2(res->pr_bt, res->pr_bh, reg, val);
+               break;
+       case 4:
+               bus_space_write_4(res->pr_bt, res->pr_bh, reg, val);
+               break;
+       default:
+               panic("unsupported access width %d\n", asz);
+       }
+}
diff --git a/sys/platform/pc64/acpica5/acpi_wakecode.S b/sys/platform/pc64/acpica5/acpi_wakecode.S
new file mode 100644 (file)
index 0000000..fbb6202
--- /dev/null
@@ -0,0 +1,239 @@
+/*-
+ * Copyright (c) 2001 Takanori Watanabe <takawata@jp.freebsd.org>
+ * Copyright (c) 2001 Mitsuru IWASAKI <iwasaki@jp.freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: src/sys/i386/acpica/acpi_wakecode.S,v 1.9 2004/01/01 22:57:22 njl Exp $
+ * $DragonFly: src/sys/platform/pc32/acpica5/acpi_wakecode.S,v 1.1 2004/02/21 06:48:05 dillon Exp $
+ */
+
+#define LOCORE
+
+#include <machine/asmacros.h>
+#include <machine/param.h>
+#include <machine/specialreg.h>
+
+       .align 4
+       .code16
+wakeup_16:
+       nop
+       cli
+
+       /*
+        * Set up segment registers for real mode and a small stack for
+        * any calls we make.
+        */
+       movw    %cs,%ax
+       movw    %ax,%ds
+       movw    %ax,%ss
+       movw    $PAGE_SIZE,%sp
+
+       /* Re-initialize video BIOS if the reset_video tunable is set. */
+       cmp     $0,reset_video
+       je      wakeup_16_gdt
+       lcall   $0xc000,$3
+
+       /*
+        * Set up segment registers for real mode again in case the
+        * previous BIOS call clobbers them.
+        */
+       movw    %cs,%ax
+       movw    %ax,%ds
+       movw    %ax,%ss
+
+wakeup_16_gdt:
+       /* Load GDT for real mode */
+       lgdt    physical_gdt
+
+       /* Restore CR2, CR3 and CR4 */
+       mov     previous_cr2,%eax
+       mov     %eax,%cr2
+       mov     previous_cr3,%eax
+       mov     %eax,%cr3
+       mov     previous_cr4,%eax
+       mov     %eax,%cr4
+
+       /* Transfer some values to protected mode */
+#define NVALUES        9
+#define TRANSFER_STACK32(val, idx)     \
+       mov     val,%eax;               \
+       mov     %eax,wakeup_32stack+(idx+1)+(idx*4);
+
+       TRANSFER_STACK32(previous_ss,           (NVALUES - 9))
+       TRANSFER_STACK32(previous_fs,           (NVALUES - 8))
+       TRANSFER_STACK32(previous_ds,           (NVALUES - 7))
+       TRANSFER_STACK32(physical_gdt+2,        (NVALUES - 6))
+       TRANSFER_STACK32(where_to_recover,      (NVALUES - 5))
+       TRANSFER_STACK32(previous_idt+2,        (NVALUES - 4))
+       TRANSFER_STACK32(previous_ldt,          (NVALUES - 3))
+       TRANSFER_STACK32(previous_gdt+2,        (NVALUES - 2))
+       TRANSFER_STACK32(previous_tr,           (NVALUES - 1))
+       TRANSFER_STACK32(previous_cr0,          (NVALUES - 0))
+
+       mov     physical_esp,%esi       /* to be used in 32bit code */
+
+       /* Enable protected mode */
+       mov     %cr0,%eax
+       orl     $(CR0_PE),%eax
+       mov     %eax,%cr0
+
+wakeup_sw32:
+       /* Switch to protected mode by intersegmental jump */
+       ljmpl   $0x8,$0x12345678        /* Code location, to be replaced */
+
+       .code32
+wakeup_32:
+       /*
+        * Switched to protected mode w/o paging
+        *      %esi:   KERNEL stack pointer (physical address)
+        */
+
+       nop
+
+       /* Set up segment registers for protected mode */
+       movw    $0x10,%ax               /* KDSEL to segment registers */
+       movw    %ax,%ds
+       movw    %ax,%es
+       movw    %ax,%gs
+       movw    %ax,%ss
+       movw    $0x18,%ax               /* KPSEL to %fs */
+       movw    %ax,%fs
+       movl    %esi,%esp               /* physical address stack pointer */
+
+wakeup_32stack:
+       /* Operands are overwritten in 16bit code */
+       pushl   $0xabcdef09             /* ss + dummy */
+       pushl   $0xabcdef08             /* fs + gs */
+       pushl   $0xabcdef07             /* ds + es */
+       pushl   $0xabcdef06             /* gdt:base (physical address) */
+       pushl   $0xabcdef05             /* recover address */
+       pushl   $0xabcdef04             /* idt:base */
+       pushl   $0xabcdef03             /* ldt + idt:limit */
+       pushl   $0xabcdef02             /* gdt:base */
+       pushl   $0xabcdef01             /* TR + gdt:limit */
+       pushl   $0xabcdef00             /* CR0 */
+
+       movl    %esp,%ebp
+#define CR0_REGISTER           0(%ebp)
+#define TASK_REGISTER          4(%ebp)
+#define PREVIOUS_GDT           6(%ebp)
+#define PREVIOUS_LDT           12(%ebp)
+#define PREVIOUS_IDT           14(%ebp)
+#define RECOVER_ADDR           20(%ebp)
+#define PHYSICAL_GDT_BASE      24(%ebp)
+#define PREVIOUS_DS            28(%ebp)
+#define PREVIOUS_ES            30(%ebp)
+#define PREVIOUS_FS            32(%ebp)
+#define PREVIOUS_GS            34(%ebp)
+#define PREVIOUS_SS            36(%ebp)
+
+       /* Fixup TSS type field */
+#define TSS_TYPEFIX_MASK       0xf9
+       xorl    %esi,%esi
+       movl    PHYSICAL_GDT_BASE,%ebx
+       movw    TASK_REGISTER,%si
+       leal    (%ebx,%esi),%eax        /* get TSS segment descriptor */
+       andb    $TSS_TYPEFIX_MASK,5(%eax)
+
+       /* Prepare to return to sleep/wakeup code point */
+       lgdt    PREVIOUS_GDT
+       lidt    PREVIOUS_IDT
+
+       xorl    %eax,%eax
+       movl    %eax,%ebx
+       movl    %eax,%ecx
+       movl    %eax,%edx
+       movl    %eax,%esi
+       movl    %eax,%edi
+       movl    PREVIOUS_DS,%ebx
+       movl    PREVIOUS_FS,%ecx
+       movl    PREVIOUS_SS,%edx
+       movw    TASK_REGISTER,%si
+       shll    $16,%esi
+       movw    PREVIOUS_LDT,%si
+       movl    RECOVER_ADDR,%edi
+
+       /* Enable paging and etc. */
+       movl    CR0_REGISTER,%eax
+       movl    %eax,%cr0
+
+       /* Flush the prefetch queue */
+       jmp     1f
+1:     jmp     1f
+1:
+       /*
+        * Now that we are in kernel virtual memory addressing
+        *      %ebx:   ds + es
+        *      %ecx:   fs + gs
+        *      %edx:   ss + dummy
+        *      %esi:   LDTR + TR
+        *      %edi:   recover address
+        */
+
+       nop
+
+       movl    %esi,%eax               /* LDTR + TR */
+       lldt    %ax                     /* load LDT register */
+       shrl    $16,%eax
+       ltr     %ax                     /* load task register */
+
+       /* Restore segment registers */
+       movl    %ebx,%eax               /* ds + es */
+       movw    %ax,%ds
+       shrl    $16,%eax
+       movw    %ax,%es
+       movl    %ecx,%eax               /* fs + gs */
+       movw    %ax,%fs
+       shrl    $16,%eax
+       movw    %ax,%gs
+       movl    %edx,%eax               /* ss */
+       movw    %ax,%ss
+
+       /* Jump to acpi_restorecpu() */
+       jmp     *%edi
+
+/* used in real mode */
+physical_gdt:          .word 0
+                       .long 0
+physical_esp:          .long 0
+previous_cr2:          .long 0
+previous_cr3:          .long 0
+previous_cr4:          .long 0
+reset_video:           .long 0
+
+/* transfer from real mode to protected mode */
+previous_cr0:          .long 0
+previous_tr:           .word 0
+previous_gdt:          .word 0
+                       .long 0
+previous_ldt:          .word 0
+previous_idt:          .word 0
+                       .long 0
+where_to_recover:      .long 0
+previous_ds:           .word 0
+previous_es:           .word 0
+previous_fs:           .word 0
+previous_gs:           .word 0
+previous_ss:           .word 0
+dummy:                 .word 0
diff --git a/sys/platform/pc64/acpica5/acpi_wakeup.c b/sys/platform/pc64/acpica5/acpi_wakeup.c
new file mode 100644 (file)
index 0000000..f83138e
--- /dev/null
@@ -0,0 +1,361 @@
+/*-
+ * Copyright (c) 2001 Takanori Watanabe <takawata@jp.freebsd.org>
+ * Copyright (c) 2001 Mitsuru IWASAKI <iwasaki@jp.freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: src/sys/i386/acpica/acpi_wakeup.c,v 1.33 2004/05/06 02:18:58 njl Exp $
+ * $DragonFly: src/sys/platform/pc32/acpica5/acpi_wakeup.c,v 1.15 2007/05/28 18:55:41 dillon Exp $
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/lock.h>
+#include <sys/proc.h>
+#include <sys/sysctl.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+
+#include <machine/cpufunc.h>
+#include <machine/segments.h>
+#include <machine_base/isa/intr_machdep.h>
+
+#include "acpi.h"
+#include <dev/acpica5/acpivar.h>
+
+#include "acpi_wakecode.h"
+
+#if __FreeBSD_version < 500000
+#define        vm_page_lock_queues()
+#define        vm_page_unlock_queues()
+#endif
+
+extern uint32_t        acpi_reset_video;
+extern void    initializecpu(void);
+
+static struct region_descriptor        r_idt, r_gdt, *p_gdt;
+static uint16_t                r_ldt;
+
+static uint32_t                r_eax, r_ebx, r_ecx, r_edx, r_ebp, r_esi, r_edi,
+                       r_efl, r_cr0, r_cr2, r_cr3, r_cr4, ret_addr;
+
+static uint16_t                r_cs, r_ds, r_es, r_fs, r_gs, r_ss, r_tr;
+static uint32_t                r_esp;
+
+static void            acpi_printcpu(void);
+static void            acpi_realmodeinst(void *arg, bus_dma_segment_t *segs,
+                                         int nsegs, int error);
+static void            acpi_alloc_wakeup_handler(void);
+
+/* XXX shut gcc up */
+extern int             acpi_savecpu(void);
+extern int             acpi_restorecpu(void);
+
+#if defined(__GNUC__) || defined(__INTEL_COMPILER)
+__asm__("                              \n\
+       .text                           \n\
+       .p2align 2, 0x90                \n\
+       .type acpi_restorecpu, @function\n\
+acpi_restorecpu:                       \n\
+       .align 4                        \n\
+       movl    r_eax,%eax              \n\
+       movl    r_ebx,%ebx              \n\
+       movl    r_ecx,%ecx              \n\
+       movl    r_edx,%edx              \n\
+       movl    r_ebp,%ebp              \n\
+       movl    r_esi,%esi              \n\
+       movl    r_edi,%edi              \n\
+       movl    r_esp,%esp              \n\
+                                       \n\
+       pushl   r_efl                   \n\
+       popfl                           \n\
+                                       \n\
+       movl    ret_addr,%eax           \n\
+       movl    %eax,(%esp)             \n\
+       xorl    %eax,%eax               \n\
+       ret                             \n\
+                                       \n\
+       .text                           \n\
+       .p2align 2, 0x90                \n\
+       .type acpi_savecpu, @function   \n\
+acpi_savecpu:                          \n\
+       movw    %cs,r_cs                \n\
+       movw    %ds,r_ds                \n\
+       movw    %es,r_es                \n\
+       movw    %fs,r_fs                \n\
+       movw    %gs,r_gs                \n\
+       movw    %ss,r_ss                \n\
+                                       \n\
+       movl    %eax,r_eax              \n\
+       movl    %ebx,r_ebx              \n\
+       movl    %ecx,r_ecx              \n\
+       movl    %edx,r_edx              \n\
+       movl    %ebp,r_ebp              \n\
+       movl    %esi,r_esi              \n\
+       movl    %edi,r_edi              \n\
+                                       \n\
+       movl    %cr0,%eax               \n\
+       movl    %eax,r_cr0              \n\
+       movl    %cr2,%eax               \n\
+       movl    %eax,r_cr2              \n\
+       movl    %cr3,%eax               \n\
+       movl    %eax,r_cr3              \n\
+       movl    %cr4,%eax               \n\
+       movl    %eax,r_cr4              \n\
+                                       \n\
+       pushfl                          \n\
+       popl    r_efl                   \n\
+                                       \n\
+       movl    %esp,r_esp              \n\
+                                       \n\
+       sgdt    r_gdt                   \n\
+       sidt    r_idt                   \n\
+       sldt    r_ldt                   \n\
+       str     r_tr                    \n\
+                                       \n\
+       movl    (%esp),%eax             \n\
+       movl    %eax,ret_addr           \n\
+       movl    $1,%eax                 \n\
+       ret                             \n\
+");
+#endif /* __GNUC__ || __INTEL_COMPILER */
+
+static void
+acpi_printcpu(void)
+{
+       kprintf("======== acpi_printcpu() debug dump ========\n");
+       kprintf("gdt[%04x:%08x] idt[%04x:%08x] ldt[%04x] tr[%04x] efl[%08x]\n",
+               r_gdt.rd_limit, r_gdt.rd_base, r_idt.rd_limit, r_idt.rd_base,
+               r_ldt, r_tr, r_efl);
+       kprintf("eax[%08x] ebx[%08x] ecx[%08x] edx[%08x]\n",
+               r_eax, r_ebx, r_ecx, r_edx);
+       kprintf("esi[%08x] edi[%08x] ebp[%08x] esp[%08x]\n",
+               r_esi, r_edi, r_ebp, r_esp);
+       kprintf("cr0[%08x] cr2[%08x] cr3[%08x] cr4[%08x]\n",
+               r_cr0, r_cr2, r_cr3, r_cr4);
+       kprintf("cs[%04x] ds[%04x] es[%04x] fs[%04x] gs[%04x] ss[%04x]\n",
+               r_cs, r_ds, r_es, r_fs, r_gs, r_ss);
+}
+
+#define WAKECODE_FIXUP(offset, type, val) do   {               \
+       type    *addr;                                          \
+       addr = (type *)(sc->acpi_wakeaddr + offset);            \
+       *addr = val;                                            \
+} while (0)
+
+#define WAKECODE_BCOPY(offset, type, val) do   {               \
+       void    *addr;                                          \
+       addr = (void *)(sc->acpi_wakeaddr + offset);            \
+       bcopy(&(val), addr, sizeof(type));                      \
+} while (0)
+
+int
+acpi_sleep_machdep(struct acpi_softc *sc, int state)
+{
+       ACPI_STATUS             status;
+       vm_paddr_t              oldphys;
+       struct pmap             *pm;
+       vm_page_t               page;
+       static vm_page_t        opage = NULL;
+       int                     ret = 0;
+       int                     pteobj_allocated = 0;
+       uint32_t                cr3;
+       u_long                  ef;
+       struct proc             *p;
+
+       if (sc->acpi_wakeaddr == 0)
+               return (0);
+
+       AcpiSetFirmwareWakingVector(sc->acpi_wakephys);
+
+       ef = read_eflags();
+       ACPI_DISABLE_IRQS();
+
+       /* Create Identity Mapping */
+       if ((p = curproc) == NULL)
+               p = &proc0;
+       pm = vmspace_pmap(p->p_vmspace);
+       cr3 = rcr3();
+#ifdef PAE
+       load_cr3(vtophys(pm->pm_pdpt));
+#else
+       load_cr3(vtophys(pm->pm_pdir));
+#endif
+       /*
+        * note: DragonFly still uses the VM object (FreeBSD-5 no longer uses
+        * the VM object).
+        */
+       if (pm->pm_pteobj == NULL) {
+               pm->pm_pteobj = vm_object_allocate(OBJT_DEFAULT, PTDPTDI + 1);
+               pteobj_allocated = 1;
+       }
+
+       oldphys = pmap_extract(pm, sc->acpi_wakephys);
+       if (oldphys)
+               opage = PHYS_TO_VM_PAGE(oldphys);
+       page = PHYS_TO_VM_PAGE(sc->acpi_wakephys);
+       pmap_enter(pm, sc->acpi_wakephys, page,
+                  VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE, 1);
+
+       ret_addr = 0;
+       if (acpi_savecpu()) {
+               /* Execute Sleep */
+               cpu_disable_intr();
+
+               p_gdt = (struct region_descriptor *)
+                               (sc->acpi_wakeaddr + physical_gdt);
+               p_gdt->rd_limit = r_gdt.rd_limit;
+               p_gdt->rd_base = vtophys(r_gdt.rd_base);
+
+               WAKECODE_FIXUP(physical_esp, uint32_t, vtophys(r_esp));
+               WAKECODE_FIXUP(previous_cr0, uint32_t, r_cr0);
+               WAKECODE_FIXUP(previous_cr2, uint32_t, r_cr2);
+               WAKECODE_FIXUP(previous_cr3, uint32_t, r_cr3);
+               WAKECODE_FIXUP(previous_cr4, uint32_t, r_cr4);
+
+               WAKECODE_FIXUP(reset_video, uint32_t, acpi_reset_video);
+
+               WAKECODE_FIXUP(previous_tr,  uint16_t, r_tr);
+               WAKECODE_BCOPY(previous_gdt, struct region_descriptor, r_gdt);
+               WAKECODE_FIXUP(previous_ldt, uint16_t, r_ldt);
+               WAKECODE_BCOPY(previous_idt, struct region_descriptor, r_idt);
+
+               WAKECODE_FIXUP(where_to_recover, void *, acpi_restorecpu);
+
+               WAKECODE_FIXUP(previous_ds,  uint16_t, r_ds);
+               WAKECODE_FIXUP(previous_es,  uint16_t, r_es);
+               WAKECODE_FIXUP(previous_fs,  uint16_t, r_fs);
+               WAKECODE_FIXUP(previous_gs,  uint16_t, r_gs);
+               WAKECODE_FIXUP(previous_ss,  uint16_t, r_ss);
+
+               if (bootverbose)
+                       acpi_printcpu();
+
+               /* Call ACPICA to enter the desired sleep state */
+               if (state == ACPI_STATE_S4 && sc->acpi_s4bios)
+                       status = AcpiEnterSleepStateS4bios();
+               else
+                       status = AcpiEnterSleepState(state);
+
+               if (status != AE_OK) {
+                       device_printf(sc->acpi_dev,
+                               "AcpiEnterSleepState failed - %s\n",
+                               AcpiFormatException(status));
+                       ret = -1;
+                       goto out;
+               }
+
+               for (;;) ;
+       } else {
+               /* Execute Wakeup */
+#if 0
+               initializecpu();
+#endif
+               icu_reinit();
+               cpu_enable_intr();
+
+               if (bootverbose) {
+                       acpi_savecpu();
+                       acpi_printcpu();
+               }
+       }
+
+out:
+       vm_page_lock_queues();
+       pmap_remove(pm, sc->acpi_wakephys, sc->acpi_wakephys + PAGE_SIZE);
+       vm_page_unlock_queues();
+       if (opage) {
+               pmap_enter(pm, sc->acpi_wakephys, page,
+                          VM_PROT_READ | VM_PROT_WRITE, 0);
+       }
+
+       if (pteobj_allocated) {
+               vm_object_deallocate(pm->pm_pteobj);
+               pm->pm_pteobj = NULL;
+       }
+       load_cr3(cr3);
+
+       write_eflags(ef);
+
+       return (ret);
+}
+
+static bus_dma_tag_t   acpi_waketag;
+static bus_dmamap_t    acpi_wakemap;
+static vm_offset_t     acpi_wakeaddr = 0;
+
+static void
+acpi_alloc_wakeup_handler(void)
+{
+       if (!cold)
+               return;
+
+       if (bus_dma_tag_create(/* parent */ NULL, /* alignment */ 2, 0,
+                              /* lowaddr below 1MB */ 0x9ffff,
+                              /* highaddr */ BUS_SPACE_MAXADDR, NULL, NULL,
+                               PAGE_SIZE, 1, PAGE_SIZE, 0, &acpi_waketag) != 0) {
+               kprintf("acpi_alloc_wakeup_handler: can't create wake tag\n");
+               return;
+       }
+
+       if (bus_dmamem_alloc(acpi_waketag, (void **)&acpi_wakeaddr,
+                            BUS_DMA_NOWAIT, &acpi_wakemap)) {
+               kprintf("acpi_alloc_wakeup_handler: can't alloc wake memory\n");
+               return;
+       }
+}
+
+SYSINIT(acpiwakeup, SI_BOOT1_POST, SI_ORDER_ANY, acpi_alloc_wakeup_handler, 0)
+
+static void
+acpi_realmodeinst(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+{
+       struct acpi_softc       *sc = arg;
+       uint32_t                *addr;
+
+       addr = (uint32_t *)&wakecode[wakeup_sw32 + 2];
+       *addr = segs[0].ds_addr + wakeup_32;
+       bcopy(wakecode, (void *)sc->acpi_wakeaddr, sizeof(wakecode));
+       sc->acpi_wakephys = segs[0].ds_addr;
+}
+
+void
+acpi_install_wakeup_handler(struct acpi_softc *sc)
+{
+       if (acpi_wakeaddr == 0)
+               return;
+
+       sc->acpi_waketag = acpi_waketag;
+       sc->acpi_wakeaddr = acpi_wakeaddr;
+       sc->acpi_wakemap = acpi_wakemap;
+
+       bus_dmamap_load(sc->acpi_waketag, sc->acpi_wakemap,
+                       (void *)sc->acpi_wakeaddr, PAGE_SIZE,
+                       acpi_realmodeinst, sc, 0);
+}
diff --git a/sys/platform/pc64/acpica5/genwakecode.sh b/sys/platform/pc64/acpica5/genwakecode.sh
new file mode 100644 (file)
index 0000000..970e5d7
--- /dev/null
@@ -0,0 +1,23 @@
+#!/bin/sh
+# $FreeBSD: src/sys/i386/acpica/genwakecode.sh,v 1.1 2002/05/01 21:52:34 peter Exp $
+# $DragonFly: src/sys/platform/pc32/acpica5/genwakecode.sh,v 1.1 2004/02/21 06:48:05 dillon Exp $
+#
+echo "/* generated from `pwd`/acpi_wakecode.o */"
+echo 'static char wakecode[] = {';
+hexdump -Cv acpi_wakecode.bin | \
+    sed -e 's/^[0-9a-f][0-9a-f]*//' -e 's/\|.*$//' | \
+    while read line
+    do
+       for code in ${line}
+       do
+           echo -n "0x${code},";
+       done
+    done
+echo '};'
+
+nm -n acpi_wakecode.o | while read offset dummy what
+do
+    echo "#define ${what}      0x${offset}"
+done
+
+exit 0
diff --git a/sys/platform/pc64/acpica5/madt.c b/sys/platform/pc64/acpica5/madt.c
new file mode 100644 (file)
index 0000000..0738271
--- /dev/null
@@ -0,0 +1,778 @@
+/*-
+ * Copyright (c) 2003 John Baldwin <jhb@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: src/sys/i386/acpica/madt.c,v 1.17 2004/06/10 20:03:46 jhb Exp $
+ * $DragonFly: src/sys/platform/pc32/acpica5/madt.c,v 1.9 2007/04/30 07:18:55 dillon Exp $
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/globaldata.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/pmap.h>
+
+#include <machine_base/apic/apicreg.h>
+#include <machine/frame.h>
+/*#include <machine/intr_machdep.h>*/
+#include <machine/md_var.h>
+#include <machine_base/apic/apicvar.h>
+#include <machine/specialreg.h>
+#include <machine/smp.h>
+#include <machine/globaldata.h>
+
+#include "acpi.h"
+#include "actables.h"
+#include "acpivar.h"
+#include <bus/pci/pcivar.h>
+
+#define        NIOAPICS                32      /* Max number of I/O APICs */
+#define        NLAPICS                 32      /* Max number of local APICs */
+
+typedef        void madt_entry_handler(APIC_HEADER *entry, void *arg);
+
+/* These two arrays are indexed by APIC IDs. */
+struct ioapic_info {
+       void *io_apic;
+       UINT32 io_vector;
+} ioapics[NIOAPICS];
+
+struct lapic_info {
+       u_int la_enabled:1;
+       u_int la_acpi_id:8;
+} lapics[NLAPICS];
+
+static int madt_found_sci_override;
+static MULTIPLE_APIC_TABLE *madt;
+static vm_paddr_t madt_physaddr;
+static vm_offset_t madt_length;
+
+MALLOC_DEFINE(M_MADT, "MADT Table", "ACPI MADT Table Items");
+
+static enum intr_polarity interrupt_polarity(UINT16 Polarity, UINT8 Source);
+static enum intr_trigger interrupt_trigger(UINT16 TriggerMode, UINT8 Source);
+static int     madt_find_cpu(u_int acpi_id, u_int *apic_id);
+static int     madt_find_interrupt(int intr, void **apic, u_int *pin);
+static void    *madt_map(vm_paddr_t pa, int offset, vm_offset_t length);
+static void    *madt_map_table(vm_paddr_t pa, int offset, const char *sig);
+static void    madt_parse_apics(APIC_HEADER *entry, void *arg);
+static void    madt_parse_interrupt_override(MADT_INTERRUPT_OVERRIDE *intr);
+static void    madt_parse_ints(APIC_HEADER *entry, void *arg __unused);
+static void    madt_parse_local_nmi(MADT_LOCAL_APIC_NMI *nmi);
+static void    madt_parse_nmi(MADT_NMI_SOURCE *nmi);
+static int     madt_probe(void);
+static int     madt_probe_cpus(void);
+static void    madt_probe_cpus_handler(APIC_HEADER *entry, void *arg __unused);
+static int     madt_probe_table(vm_paddr_t address);
+static void    madt_register(void *dummy);
+static int     madt_setup_local(void);
+static int     madt_setup_io(void);
+static void    madt_unmap(void *data, vm_offset_t length);
+static void    madt_unmap_table(void *table);
+static void    madt_walk_table(madt_entry_handler *handler, void *arg);
+
+static struct apic_enumerator madt_enumerator = {
+       "MADT",
+       madt_probe,
+       madt_probe_cpus,
+       madt_setup_local,
+       madt_setup_io
+};
+
+/*
+ * Code to abuse the crashdump map to map in the tables for the early
+ * probe.  We cheat and make the following assumptions about how we
+ * use this KVA: page 0 is used to map in the first page of each table
+ * found via the RSDT or XSDT and pages 1 to n are used to map in the
+ * RSDT or XSDT.  The offset is in pages; the length is in bytes.
+ */
+static void *
+madt_map(vm_paddr_t pa, int offset, vm_offset_t length)
+{
+       vm_offset_t va, off;
+       void *data;
+
+       off = pa & PAGE_MASK;
+       length = roundup(length + off, PAGE_SIZE);
+       pa = pa & PG_FRAME;
+       va = (vm_offset_t)pmap_kenter_temporary(pa, offset) +
+           (offset * PAGE_SIZE);
+       data = (void *)(va + off);
+       length -= PAGE_SIZE;
+       while (length > 0) {
+               va += PAGE_SIZE;
+               pa += PAGE_SIZE;
+               length -= PAGE_SIZE;
+               pmap_kenter(va, pa);
+               cpu_invlpg((void *)va);
+       }
+       return (data);
+}
+
+static void
+madt_unmap(void *data, vm_offset_t length)
+{
+       vm_offset_t va, off;
+
+       va = (vm_offset_t)data;
+       off = va & PAGE_MASK;
+       length = roundup(length + off, PAGE_SIZE);
+       va &= ~PAGE_MASK;
+       while (length > 0) {
+               pmap_kremove(va);
+               cpu_invlpg((void *)va);
+               va += PAGE_SIZE;
+               length -= PAGE_SIZE;
+       }
+}
+
+static void *
+madt_map_table(vm_paddr_t pa, int offset, const char *sig)
+{
+       ACPI_TABLE_HEADER *header;
+       vm_offset_t length;
+       void *table;
+
+       header = madt_map(pa, offset, sizeof(ACPI_TABLE_HEADER));
+       if (strncmp(header->Signature, sig, 4) != 0) {
+               madt_unmap(header, sizeof(ACPI_TABLE_HEADER));
+               return (NULL);
+       }
+       length = header->Length;
+       madt_unmap(header, sizeof(ACPI_TABLE_HEADER));
+       table = madt_map(pa, offset, length);
+       if (ACPI_FAILURE(AcpiTbVerifyTableChecksum(table))) {
+               if (bootverbose)
+                       kprintf("MADT: Failed checksum for table %s\n", sig);
+               madt_unmap(table, length);
+               return (NULL);
+       }
+       return (table);
+}
+
+static void
+madt_unmap_table(void *table)
+{
+       ACPI_TABLE_HEADER *header;
+
+       header = (ACPI_TABLE_HEADER *)table;
+       madt_unmap(table, header->Length);
+}
+
+/*
+ * Look for an ACPI Multiple APIC Description Table ("APIC")
+ */
+static int
+madt_probe(void)
+{
+       ACPI_POINTER rsdp_ptr;
+       RSDP_DESCRIPTOR *rsdp;
+       RSDT_DESCRIPTOR *rsdt;
+       XSDT_DESCRIPTOR *xsdt;
+       int i, count;
+
+       if (resource_disabled("acpi", 0))
+               return (ENXIO);
+
+       /*
+        * Map in the RSDP.  Since ACPI uses AcpiOsMapMemory() which in turn
+        * calls pmap_mapdev() to find the RSDP, we assume that we can use
+        * pmap_mapdev() to map the RSDP.
+        */
+       if (AcpiOsGetRootPointer(ACPI_LOGICAL_ADDRESSING, &rsdp_ptr) != AE_OK)
+               return (ENXIO);
+       rsdp = pmap_mapdev(rsdp_ptr.Pointer.Physical, sizeof(RSDP_DESCRIPTOR));
+       if (rsdp == NULL) {
+               if (bootverbose)
+                       kprintf("MADT: Failed to map RSDP\n");
+               return (ENXIO);
+       }
+
+       /*
+        * For ACPI < 2.0, use the RSDT.  For ACPI >= 2.0, use the XSDT.
+        * We map the XSDT and RSDT at page 1 in the crashdump area.
+        * Page 0 is used to map in the headers of candidate ACPI tables.
+        */
+       if (rsdp->Revision >= 2) {
+               /*
+                * AcpiOsGetRootPointer only verifies the checksum for
+                * the version 1.0 portion of the RSDP.  Version 2.0 has
+                * an additional checksum that we verify first.
+                */
+               if (AcpiTbChecksum(rsdp, ACPI_RSDP_XCHECKSUM_LENGTH) != 0) {
+                       if (bootverbose)
+                               kprintf("MADT: RSDP failed extended checksum\n");
+                       return (ENXIO);
+               }
+               xsdt = madt_map_table(rsdp->XsdtPhysicalAddress, 1, XSDT_SIG);
+               if (xsdt == NULL) {
+                       if (bootverbose)
+                               kprintf("MADT: Failed to map XSDT\n");
+                       return (ENXIO);
+               }
+               count = (xsdt->Length - sizeof(ACPI_TABLE_HEADER)) /
+                   sizeof(UINT64);
+               for (i = 0; i < count; i++)
+                       if (madt_probe_table(xsdt->TableOffsetEntry[i]))
+                               break;
+               madt_unmap_table(xsdt);
+       } else {
+               rsdt = madt_map_table(rsdp->RsdtPhysicalAddress, 1, RSDT_SIG);
+               if (rsdt == NULL) {
+                       if (bootverbose)
+                               kprintf("MADT: Failed to map RSDT\n");
+                       return (ENXIO);
+               }
+               count = (rsdt->Length - sizeof(ACPI_TABLE_HEADER)) /
+                   sizeof(UINT32);
+               for (i = 0; i < count; i++)
+                       if (madt_probe_table(rsdt->TableOffsetEntry[i]))
+                               break;
+               madt_unmap_table(rsdt);
+       }
+       pmap_unmapdev((vm_offset_t)rsdp, sizeof(RSDP_DESCRIPTOR));
+       if (madt_physaddr == 0) {
+               if (bootverbose)
+                       kprintf("MADT: No MADT table found\n");
+               return (ENXIO);
+       }
+       if (bootverbose)
+               kprintf("MADT: Found table at 0x%jx\n",
+                   (uintmax_t)madt_physaddr);
+
+       /*
+        * Verify that we can map the full table and that its checksum is
+        * correct, etc.
+        */
+       madt = madt_map_table(madt_physaddr, 0, APIC_SIG);
+       if (madt == NULL)
+               return (ENXIO);
+       madt_unmap_table(madt);
+       madt = NULL;
+
+       return (0);
+}
+
+/*
+ * See if a given ACPI table is the MADT.
+ */
+static int
+madt_probe_table(vm_paddr_t address)
+{
+       ACPI_TABLE_HEADER *table;
+
+       table = madt_map(address, 0, sizeof(ACPI_TABLE_HEADER));
+       if (table == NULL) {
+               if (bootverbose)
+                       kprintf("MADT: Failed to map table at 0x%jx\n",
+                           (uintmax_t)address);
+               return (0);
+       }
+       if (bootverbose)
+               kprintf("Table '%.4s' at 0x%jx\n", table->Signature,
+                   (uintmax_t)address);
+
+       if (strncmp(table->Signature, APIC_SIG, 4) != 0) {
+               madt_unmap(table, sizeof(ACPI_TABLE_HEADER));
+               return (0);
+       }
+       madt_physaddr = address;
+       madt_length = table->Length;
+       madt_unmap(table, sizeof(ACPI_TABLE_HEADER));
+       return (1);
+}
+
+/*
+ * Run through the MP table enumerating CPUs.
+ */
+static int
+madt_probe_cpus(void)
+{
+
+       madt = madt_map_table(madt_physaddr, 0, APIC_SIG);
+       KASSERT(madt != NULL, ("Unable to re-map MADT"));
+       madt_walk_table(madt_probe_cpus_handler, NULL);
+       madt_unmap_table(madt);
+       madt = NULL;
+       return (0);
+}
+
+/*
+ * Initialize the local APIC on the BSP.
+ */
+static int
+madt_setup_local(void)
+{
+
+       madt = pmap_mapdev(madt_physaddr, madt_length);
+       lapic_init((uintptr_t)madt->LocalApicAddress);
+       kprintf("ACPI APIC Table: <%.*s %.*s>\n",
+           (int)sizeof(madt->OemId), madt->OemId,
+           (int)sizeof(madt->OemTableId), madt->OemTableId);
+
+       /*
+        * We ignore 64-bit local APIC override entries.  Should we
+        * perhaps emit a warning here if we find one?
+        */
+       return (0);
+}
+
+/*
+ * Enumerate I/O APICs and setup interrupt sources.
+ */
+static int
+madt_setup_io(void)
+{
+       void *ioapic;
+       u_int pin;
+       int i;
+
+       /* Try to initialize ACPI so that we can access the FADT. */
+       i = acpi_Startup();
+       if (ACPI_FAILURE(i)) {
+               kprintf("MADT: ACPI Startup failed with %s\n",
+                   AcpiFormatException(i));
+               kprintf("Try disabling either ACPI or apic support.\n");
+               panic("Using MADT but ACPI doesn't work");
+       }
+
+       /* First, we run through adding I/O APIC's. */
+       if (madt->PCATCompat)
+               ioapic_enable_mixed_mode();
+       madt_walk_table(madt_parse_apics, NULL);
+
+       /* Second, we run through the table tweaking interrupt sources. */
+       madt_walk_table(madt_parse_ints, NULL);
+
+       /*
+        * If there was not an explicit override entry for the SCI,
+        * force it to use level trigger and active-low polarity.
+        */
+       if (!madt_found_sci_override) {
+               if (madt_find_interrupt(AcpiGbl_FADT->SciInt, &ioapic, &pin)
+                   != 0)
+                       kprintf("MADT: Could not find APIC for SCI IRQ %d\n",
+                           AcpiGbl_FADT->SciInt);
+               else {
+                       kprintf(
+       "MADT: Forcing active-low polarity and level trigger for SCI\n");
+                       ioapic_set_polarity(ioapic, pin, INTR_POLARITY_LOW);
+                       ioapic_set_triggermode(ioapic, pin, INTR_TRIGGER_LEVEL);
+               }
+       }
+
+       /* Third, we register all the I/O APIC's. */
+       for (i = 0; i < NIOAPICS; i++)
+               if (ioapics[i].io_apic != NULL)
+                       ioapic_register(ioapics[i].io_apic);
+
+       /* Finally, we throw the switch to enable the I/O APIC's. */
+       acpi_SetDefaultIntrModel(ACPI_INTR_APIC);
+
+       return (0);
+}
+
+/*
+ * Register an enumerator that the SMP startup code might use
+ */
+static void
+madt_register(void *dummy __unused)
+{
+       apic_register_enumerator(&madt_enumerator);
+}
+SYSINIT(madt_register, SI_BOOT2_PRESMP, SI_ORDER_FIRST, madt_register, NULL)
+
+/*
+ * Call the handler routine for each entry in the MADT table.
+ */
+static void
+madt_walk_table(madt_entry_handler *handler, void *arg)
+{
+       APIC_HEADER *entry;
+       u_char *p, *end;
+
+       end = (u_char *)(madt) + madt->Length;
+       for (p = (u_char *)(madt + 1); p < end; ) {
+               entry = (APIC_HEADER *)p;
+               handler(entry, arg);
+               p += entry->Length;
+       }
+}
+
+static void
+madt_probe_cpus_handler(APIC_HEADER *entry, void *arg)
+{
+       MADT_PROCESSOR_APIC *proc;
+       struct lapic_info *la;
+
+       switch (entry->Type) {
+       case APIC_PROCESSOR:
+               /*
+                * The MADT does not include a BSP flag, so we have to
+                * let the MP code figure out which CPU is the BSP on
+                * its own.
+                */
+               proc = (MADT_PROCESSOR_APIC *)entry;
+               if (bootverbose)
+                       kprintf("MADT: Found CPU APIC ID %d ACPI ID %d: %s\n",
+                           proc->LocalApicId, proc->ProcessorId,
+                           proc->ProcessorEnabled ? "enabled" : "disabled");
+               if (!proc->ProcessorEnabled)
+                       break;
+               if (proc->LocalApicId >= NLAPICS)
+                       panic("%s: CPU ID %d too high", __func__,
+                           proc->LocalApicId);
+               la = &lapics[proc->LocalApicId];
+               KASSERT(la->la_enabled == 0,
+                   ("Duplicate local APIC ID %d", proc->LocalApicId));
+               la->la_enabled = 1;
+               la->la_acpi_id = proc->ProcessorId;
+               lapic_create(proc->LocalApicId, 0);
+               break;
+       }
+}
+
+
+/*
+ * Add an I/O APIC from an entry in the table.
+ */
+static void
+madt_parse_apics(APIC_HEADER *entry, void *arg __unused)
+{
+       MADT_IO_APIC *apic;
+
+       switch (entry->Type) {
+       case APIC_IO:
+               apic = (MADT_IO_APIC *)entry;
+               if (bootverbose)
+                       kprintf("MADT: Found IO APIC ID %d, Interrupt %d at %p\n",
+                           apic->IoApicId, apic->Interrupt,
+                           (void *)(uintptr_t)apic->Address);
+               if (apic->IoApicId >= NIOAPICS)
+                       panic("%s: I/O APIC ID %d too high", __func__,
+                           apic->IoApicId);
+               if (ioapics[apic->IoApicId].io_apic != NULL)
+                       panic("%s: Double APIC ID %d", __func__,
+                           apic->IoApicId);
+               ioapics[apic->IoApicId].io_apic = ioapic_create(
+                       (uintptr_t)apic->Address, apic->IoApicId,
+                           apic->Interrupt);
+               ioapics[apic->IoApicId].io_vector = apic->Interrupt;
+               break;
+       default:
+               break;
+       }
+}
+
+/*
+ * Determine properties of an interrupt source.  Note that for ACPI these
+ * functions are only used for ISA interrupts, so we assume ISA bus values
+ * (Active Hi, Edge Triggered) for conforming values except for the ACPI
+ * SCI for which we use Active Lo, Level Triggered.
+ */
+static enum intr_polarity
+interrupt_polarity(UINT16 Polarity, UINT8 Source)
+{
+
+       switch (Polarity) {
+       case POLARITY_CONFORMS:
+               if (Source == AcpiGbl_FADT->SciInt)
+                       return (INTR_POLARITY_LOW);
+               else
+                       return (INTR_POLARITY_HIGH);
+       case POLARITY_ACTIVE_HIGH:
+               return (INTR_POLARITY_HIGH);
+       case POLARITY_ACTIVE_LOW:
+               return (INTR_POLARITY_LOW);
+       default:
+               panic("Bogus Interrupt Polarity");
+       }
+}
+
+static enum intr_trigger
+interrupt_trigger(UINT16 TriggerMode, UINT8 Source)
+{
+
+       switch (TriggerMode) {
+       case TRIGGER_CONFORMS:
+               if (Source == AcpiGbl_FADT->SciInt)
+                       return (INTR_TRIGGER_LEVEL);
+               else
+                       return (INTR_TRIGGER_EDGE);
+       case TRIGGER_EDGE:
+               return (INTR_TRIGGER_EDGE);
+       case TRIGGER_LEVEL:
+               return (INTR_TRIGGER_LEVEL);
+       default:
+               panic("Bogus Interrupt Trigger Mode");
+       }
+}
+
+/*
+ * Find the local APIC ID associated with a given ACPI Processor ID.
+ */
+static int
+madt_find_cpu(u_int acpi_id, u_int *apic_id)
+{
+       int i;
+
+       for (i = 0; i < NLAPICS; i++) {
+               if (!lapics[i].la_enabled)
+                       continue;
+               if (lapics[i].la_acpi_id != acpi_id)
+                       continue;
+               *apic_id = i;
+               return (0);
+       }
+       return (ENOENT);
+}
+
+/*
+ * Find the IO APIC and pin on that APIC associated with a given global
+ * interrupt.
+ */
+static int
+madt_find_interrupt(int intr, void **apic, u_int *pin)
+{
+       int i, best;
+
+       best = -1;
+       for (i = 0; i < NIOAPICS; i++) {
+               if (ioapics[i].io_apic == NULL ||
+                   ioapics[i].io_vector > intr)
+                       continue;
+               if (best == -1 ||
+                   ioapics[best].io_vector < ioapics[i].io_vector)
+                       best = i;
+       }
+       if (best == -1)
+               return (ENOENT);
+       *apic = ioapics[best].io_apic;
+       *pin = intr - ioapics[best].io_vector;
+       if (*pin > 32)
+               kprintf("WARNING: Found intpin of %u for vector %d\n", *pin,
+                   intr);
+       return (0);
+}
+
+/*
+ * Parse an interrupt source override for an ISA interrupt.
+ */
+static void
+madt_parse_interrupt_override(MADT_INTERRUPT_OVERRIDE *intr)
+{
+       void *new_ioapic, *old_ioapic;
+       u_int new_pin, old_pin;
+       enum intr_trigger trig;
+       enum intr_polarity pol;
+       char buf[64];
+
+       if (bootverbose)
+               kprintf("MADT: intr override: source %u, irq %u\n",
+                   intr->Source, intr->Interrupt);
+       KASSERT(intr->Bus == 0, ("bus for interrupt overrides must be zero"));
+       if (madt_find_interrupt(intr->Interrupt, &new_ioapic,
+           &new_pin) != 0) {
+               kprintf("MADT: Could not find APIC for vector %d (IRQ %d)\n",
+                   intr->Interrupt, intr->Source);
+               return;
+       }
+
+       /*
+        * Lookup the appropriate trigger and polarity modes for this
+        * entry.
+        */
+       trig = interrupt_trigger(intr->TriggerMode, intr->Source);
+       pol = interrupt_polarity(intr->Polarity, intr->Source);
+
+       /*
+        * If the SCI is identity mapped but has edge trigger and
+        * active-hi polarity or the force_sci_lo tunable is set,
+        * force it to use level/lo.
+        */
+       if (intr->Source == AcpiGbl_FADT->SciInt) {
+               madt_found_sci_override = 1;
+               if (kgetenv_string("hw.acpi.sci.trigger", buf, sizeof(buf))) {
+                       if (tolower(buf[0]) == 'e')
+                               trig = INTR_TRIGGER_EDGE;
+                       else if (tolower(buf[0]) == 'l')
+                               trig = INTR_TRIGGER_LEVEL;
+                       else
+                               panic(
+                               "Invalid trigger %s: must be 'edge' or 'level'",
+                                   buf);
+                       kprintf("MADT: Forcing SCI to %s trigger\n",
+                           trig == INTR_TRIGGER_EDGE ? "edge" : "level");
+               }
+               if (kgetenv_string("hw.acpi.sci.polarity", buf, sizeof(buf))) {
+                       if (tolower(buf[0]) == 'h')
+                               pol = INTR_POLARITY_HIGH;
+                       else if (tolower(buf[0]) == 'l')
+                               pol = INTR_POLARITY_LOW;
+                       else
+                               panic(
+                               "Invalid polarity %s: must be 'high' or 'low'",
+                                   buf);
+                       kprintf("MADT: Forcing SCI to active %s polarity\n",
+                           pol == INTR_POLARITY_HIGH ? "high" : "low");
+               }
+       }
+
+       /* Remap the IRQ if it is mapped to a different interrupt vector. */
+       if (intr->Source != intr->Interrupt) {
+               /*
+                * If the SCI is remapped to a non-ISA global interrupt,
+                * then override the vector we use to setup and allocate
+                * the interrupt.
+                */
+               if (intr->Interrupt > 15 &&
+                   intr->Source == AcpiGbl_FADT->SciInt)
+                       acpi_OverrideInterruptLevel(intr->Interrupt);
+               else
+                       ioapic_remap_vector(new_ioapic, new_pin, intr->Source);
+               if (madt_find_interrupt(intr->Source, &old_ioapic,
+                   &old_pin) != 0)
+                       kprintf("MADT: Could not find APIC for source IRQ %d\n",
+                           intr->Source);
+               else if (ioapic_get_vector(old_ioapic, old_pin) ==
+                   intr->Source)
+                       ioapic_disable_pin(old_ioapic, old_pin);
+       }
+
+       /* Program the polarity and trigger mode. */
+       ioapic_set_triggermode(new_ioapic, new_pin, trig);
+       ioapic_set_polarity(new_ioapic, new_pin, pol);
+}
+
+/*
+ * Parse an entry for an NMI routed to an IO APIC.
+ */
+static void
+madt_parse_nmi(MADT_NMI_SOURCE *nmi)
+{
+       void *ioapic;
+       u_int pin;
+
+       if (madt_find_interrupt(nmi->Interrupt, &ioapic, &pin) != 0) {
+               kprintf("MADT: Could not find APIC for vector %d\n",
+                   nmi->Interrupt);
+               return;
+       }
+
+       ioapic_set_nmi(ioapic, pin);
+       if (nmi->TriggerMode != TRIGGER_CONFORMS)
+               ioapic_set_triggermode(ioapic, pin,
+                   interrupt_trigger(nmi->TriggerMode, 0));
+       if (nmi->Polarity != TRIGGER_CONFORMS)
+               ioapic_set_polarity(ioapic, pin,
+                   interrupt_polarity(nmi->Polarity, 0));
+}
+
+/*
+ * Parse an entry for an NMI routed to a local APIC LVT pin.
+ */
+static void
+madt_parse_local_nmi(MADT_LOCAL_APIC_NMI *nmi)
+{
+       u_int apic_id, pin;
+
+       if (nmi->ProcessorId == 0xff)
+               apic_id = APIC_ID_ALL;
+       else if (madt_find_cpu(nmi->ProcessorId, &apic_id) != 0) {
+               if (bootverbose)
+                       kprintf("MADT: Ignoring local NMI routed to ACPI CPU %u\n",
+                           nmi->ProcessorId);
+               return;
+       }
+       if (nmi->Lint == 0)
+               pin = LVT_LINT0;
+       else
+               pin = LVT_LINT1;
+       lapic_set_lvt_mode(apic_id, pin, APIC_LVT_DM_NMI);
+       if (nmi->TriggerMode != TRIGGER_CONFORMS)
+               lapic_set_lvt_triggermode(apic_id, pin,
+                   interrupt_trigger(nmi->TriggerMode, 0));
+       if (nmi->Polarity != POLARITY_CONFORMS)
+               lapic_set_lvt_polarity(apic_id, pin,
+                   interrupt_polarity(nmi->Polarity, 0));
+}
+
+/*
+ * Parse interrupt entries.
+ */
+static void
+madt_parse_ints(APIC_HEADER *entry, void *arg __unused)
+{
+
+       switch (entry->Type) {
+       case APIC_XRUPT_OVERRIDE:
+               madt_parse_interrupt_override(
+                       (MADT_INTERRUPT_OVERRIDE *)entry);
+               break;
+       case APIC_NMI:
+               madt_parse_nmi((MADT_NMI_SOURCE *)entry);
+               break;
+       case APIC_LOCAL_NMI:
+               madt_parse_local_nmi((MADT_LOCAL_APIC_NMI *)entry);
+               break;
+       }
+}
+
+/*
+ * Setup per-CPU ACPI IDs.  This is done as part of the high-level BIOS
+ * setup (after SMP), but before MACHDEP systems are initialized.
+ */
+static void
+madt_set_ids(void *dummy)
+{
+       struct lapic_info *la;
+       struct mdglobaldata *md;
+       u_int i;
+
+       if (madt == NULL)
+               return;
+       for (i = 0; i < ncpus; i++) {
+               if ((smp_active_mask & (1 << i)) == 0)
+                       continue;
+               md = (struct mdglobaldata *)globaldata_find(i);
+               KKASSERT(md != NULL);
+               la = &lapics[md->gd_apic_id];
+               if (!la->la_enabled)
+                       panic("APIC: CPU with APIC ID %u is not enabled",
+                           md->gd_apic_id);
+               md->gd_acpi_id = la->la_acpi_id;
+               if (bootverbose)
+                       kprintf("APIC: CPU %u has ACPI ID %u\n", i,
+                           la->la_acpi_id);
+       }
+}
+SYSINIT(madt_set_ids, SI_BOOT2_BIOS, SI_ORDER_FIRST, madt_set_ids, NULL)
diff --git a/sys/platform/pc64/amd64/est.c b/sys/platform/pc64/amd64/est.c
new file mode 100644 (file)
index 0000000..0bd2055
--- /dev/null
@@ -0,0 +1,773 @@
+/*     $NetBSD: est.c,v 1.25 2006/06/18 16:39:56 nonaka Exp $  */
+/*
+ * Copyright (c) 2003 Michael Eriksson.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/*-
+ * Copyright (c) 2004 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *        This product includes software developed by the NetBSD
+ *        Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This is a driver for Intel's Enhanced SpeedStep Technology (EST),
+ * as implemented in Pentium M processors.
+ *
+ * Reference documentation:
+ *
+ * - IA-32 Intel Architecture Software Developer's Manual, Volume 3:
+ *   System Programming Guide.
+ *   Section 13.14, Enhanced Intel SpeedStep technology.
+ *   Table B-2, MSRs in Pentium M Processors.
+ *   http://www.intel.com/design/pentium4/manuals/253668.htm
+ *
+ * - Intel Pentium M Processor Datasheet.
+ *   Table 5, Voltage and Current Specifications.
+ *   http://www.intel.com/design/mobile/datashts/252612.htm
+ *
+ * - Intel Pentium M Processor on 90 nm Process with 2-MB L2 Cache Datasheet
+ *   Table 3-4, 3-5, 3-6, Voltage and Current Specifications.
+ *   http://www.intel.com/design/mobile/datashts/302189.htm
+ *
+ * - Linux cpufreq patches, speedstep-centrino.c.
+ *   Encoding of MSR_PERF_CTL and MSR_PERF_STATUS.
+ *   http://www.codemonkey.org.uk/projects/cpufreq/cpufreq-2.4.22-pre6-1.gz
+ *
+ *   ACPI objects: _PCT is MSR location, _PSS is freq/voltage, _PPC is caps.
+ *
+ * $NetBSD: est.c,v 1.25 2006/06/18 16:39:56 nonaka Exp $
+ * $DragonFly: src/sys/platform/pc32/i386/est.c,v 1.11 2008/06/05 18:06:32 swildner Exp $
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/sysctl.h>
+
+#include <machine/cpu.h>
+#include <machine/md_var.h>
+#include <machine/specialreg.h>
+
+
+struct fq_info {
+       int mhz;
+       int mv;
+};
+
+/* Ultra Low Voltage Intel Pentium M processor 900 MHz */
+static const struct fq_info pentium_m_900[] = {
+       {  900, 1004 },
+       {  800,  988 },
+       {  600,  844 },
+};
+
+/* Ultra Low Voltage Intel Pentium M processor 1.00 GHz */
+static const struct fq_info pentium_m_1000[] = {
+       { 1000, 1004 },
+       {  900,  988 },
+       {  800,  972 },
+       {  600,  844 },
+};
+
+/* Low Voltage Intel Pentium M processor 1.10 GHz */
+static const struct fq_info pentium_m_1100[] = {
+       { 1100, 1180 },
+       { 1000, 1164 },
+       {  900, 1100 },
+       {  800, 1020 },
+       {  600,  956 },
+};
+
+/* Low Voltage Intel Pentium M processor 1.20 GHz */
+static const struct fq_info pentium_m_1200[] = {
+       { 1200, 1180 },
+       { 1100, 1164 },
+       { 1000, 1100 },
+       {  900, 1020 },
+       {  800, 1004 },
+       {  600,  956 },
+};
+
+/* Low Voltage Intel Pentium M processor 1.30 GHz */
+static const struct fq_info pentium_m_1300_lv[] = {
+       { 1300, 1180 },
+       { 1200, 1164 },
+       { 1100, 1100 },
+       { 1000, 1020 },
+       {  900, 1004 },
+       {  800,  988 },
+       {  600,  956 },
+};
+
+/* Intel Pentium M processor 1.30 GHz */
+static const struct fq_info pentium_m_1300[] = {
+       { 1300, 1388 },
+       { 1200, 1356 },
+       { 1000, 1292 },
+       {  800, 1260 },
+       {  600,  956 },
+};
+
+/* Intel Pentium M processor 1.40 GHz */
+static const struct fq_info pentium_m_1400[] = {
+       { 1400, 1484 },
+       { 1200, 1436 },
+       { 1000, 1308 },
+       {  800, 1180 },
+       {  600,  956 }
+};
+
+/* Intel Pentium M processor 1.50 GHz */
+static const struct fq_info pentium_m_1500[] = {
+       { 1500, 1484 },
+       { 1400, 1452 },
+       { 1200, 1356 },
+       { 1000, 1228 },
+       {  800, 1116 },
+       {  600,  956 }
+};
+
+/* Intel Pentium M processor 1.60 GHz */
+static const struct fq_info pentium_m_1600[] = {
+       { 1600, 1484 },
+       { 1400, 1420 },
+       { 1200, 1276 },
+       { 1000, 1164 },
+       {  800, 1036 },
+       {  600,  956 }
+};
+
+/* Intel Pentium M processor 1.70 GHz */
+static const struct fq_info pentium_m_1700[] = {
+       { 1700, 1484 },
+       { 1400, 1308 },
+       { 1200, 1228 },
+       { 1000, 1116 },
+       {  800, 1004 },
+       {  600,  956 }
+};
+
+/* Intel Pentium M processor 723 Ultra Low Voltage 1.0 GHz */
+static const struct fq_info pentium_m_n723[] = {
+       { 1000,  940 },
+       {  900,  908 },
+       {  800,  876 },
+       {  600,  812 }
+};
+
+/* Intel Pentium M processor 733 Ultra Low Voltage 1.1 GHz */
+static const struct fq_info pentium_m_n733[] = {
+       { 1100,  940 },
+       { 1000,  924 },
+       {  900,  892 },
+       {  800,  876 },
+       {  600,  812 }
+};
+
+/* Intel Pentium M processor 753 Ultra Low Voltage 1.2 GHz */
+static const struct fq_info pentium_m_n753[] = {
+       { 1200,  940 },
+       { 1100,  924 },
+       { 1000,  908 },
+       {  900,  876 },
+       {  800,  860 },
+       {  600,  812 }
+};
+
+/* Intel Pentium M processor 773 Ultra Low Voltage 1.3 GHz */
+static const struct fq_info pentium_m_n773[] = {
+       { 1300,  940 },
+       { 1200,  924 },
+       { 1100,  908 },
+       { 1000,  892 },
+       {  900,  876 },
+       {  800,  860 },
+       {  600,  812 }
+};
+
+/* Intel Pentium M processor 738 Low Voltage 1.4 GHz */
+static const struct fq_info pentium_m_n738[] = {
+       { 1400, 1116 },
+       { 1300, 1116 },
+       { 1200, 1100 },
+       { 1100, 1068 },
+       { 1000, 1052 },
+       {  900, 1036 },
+       {  800, 1020 },
+       {  600,  988 }
+};
+
+/* Intel Pentium M processor 758 Low Voltage 1.5 GHz */
+static const struct fq_info pentium_m_n758[] = {
+       { 1500, 1116 },
+       { 1400, 1116 },
+       { 1300, 1100 },
+       { 1200, 1084 },
+       { 1100, 1068 },
+       { 1000, 1052 },
+       {  900, 1036 },
+       {  800, 1020 },
+       {  600,  988 }
+};
+
+/* Intel Pentium M processor 778 Low Voltage 1.6 GHz */
+static const struct fq_info pentium_m_n778[] = {
+       { 1600, 1116 },
+       { 1500, 1116 },
+       { 1400, 1100 },
+       { 1300, 1184 },
+       { 1200, 1068 },
+       { 1100, 1052 },
+       { 1000, 1052 },
+       {  900, 1036 },
+       {  800, 1020 },
+       {  600,  988 }
+};
+
+/* Intel Pentium M processor 710 1.4 GHz */
+static const struct fq_info pentium_m_n710[] = {
+       { 1400, 1340 },
+       { 1200, 1228 },
+       { 1000, 1148 },
+       {  800, 1068 },
+       {  600,  998 }
+};
+
+/* Intel Pentium M processor 715 1.5 GHz */
+static const struct fq_info pentium_m_n715[] = {
+       { 1500, 1340 },
+       { 1200, 1228 },
+       { 1000, 1148 },
+       {  800, 1068 },
+       {  600,  988 }
+};
+
+/* Intel Pentium M processor 725 1.6 GHz */
+static const struct fq_info pentium_m_n725[] = {
+       { 1600, 1340 },
+       { 1400, 1276 },
+       { 1200, 1212 },
+       { 1000, 1132 },
+       {  800, 1068 },
+       {  600,  988 }
+};
+
+/* Intel Pentium M processor 730 1.6 GHz */
+static const struct fq_info pentium_m_n730[] = {
+       { 1600, 1308 },
+       { 1333, 1260 },
+       { 1200, 1212 },
+       { 1067, 1180 },
+       {  800,  988 }
+};
+
+/* Intel Pentium M processor 735 1.7 GHz */
+static const struct fq_info pentium_m_n735[] = {
+       { 1700, 1340 },
+       { 1400, 1244 },
+       { 1200, 1180 },
+       { 1000, 1116 },
+       {  800, 1052 },
+       {  600,  988 }
+};
+
+/* Intel Pentium M processor 740 1.73 GHz */
+static const struct fq_info pentium_m_n740[] = {
+       { 1733, 1356 },
+       { 1333, 1212 },
+       { 1067, 1100 },
+       {  800,  988 },
+};
+
+/* Intel Pentium M processor 740 1.73 GHz (988-1308mV version?) */
+static const struct fq_info pentium_m_n740_2[] = {
+       { 1733, 1308 },
+       { 1333, 1148 },
+       { 1067, 1068 },
+       {  800,  988 }
+};
+
+/* Intel Pentium M processor 745 1.8 GHz */
+static const struct fq_info pentium_m_n745[] = {
+       { 1800, 1340 },
+       { 1600, 1292 },
+       { 1400, 1228 },
+       { 1200, 1164 },
+       { 1000, 1116 },
+       {  800, 1052 },
+       {  600,  988 }
+};
+
+/* Intel Pentium M processor 750 1.86 GHz */
+/* values extracted from \_PR\NPSS (via _PSS) SDST ACPI table */
+static const struct fq_info pentium_m_n750[] = {
+       { 1867, 1308 },
+       { 1600, 1228 },
+       { 1333, 1148 },
+       { 1067, 1068 },
+       {  800,  988 }
+};
+
+static const struct fq_info pentium_m_n750_2[] = {
+       { 1867, 1356 },
+       { 1600, 1228 },
+       { 1333, 1148 },
+       { 1067, 1068 },
+       {  800,  988 }
+};
+
+/* Intel Pentium M processor 755 2.0 GHz */
+static const struct fq_info pentium_m_n755[] = {
+       { 2000, 1340 },
+       { 1800, 1292 },
+       { 1600, 1244 },
+       { 1400, 1196 },
+       { 1200, 1148 },
+       { 1000, 1100 },
+       {  800, 1052 },
+       {  600,  988 }
+};
+
+/* Intel Pentium M processor 760 2.0 GHz */
+static const struct fq_info pentium_m_n760[] = {
+       { 2000, 1356 },
+       { 1600, 1244 },
+       { 1333, 1164 },
+       { 1067, 1084 },
+       {  800,  988 }
+};
+
+/* Intel Pentium M processor 760 2.0 GHz */
+static const struct fq_info pentium_m_n760_2[] = {
+       { 2000, 1308 },
+       { 1600, 1244 },
+       { 1333, 1164 },
+       { 1067, 1084 },
+       {  800,  988 }
+};
+
+/* Intel Pentium M processor 765 2.1 GHz */
+static const struct fq_info pentium_m_n765[] = {
+       { 2100, 1340 },
+       { 1800, 1276 },
+       { 1600, 1228 },
+       { 1400, 1180 },
+       { 1200, 1132 },
+       { 1000, 1084 },
+       {  800, 1036 },
+       {  600,  988 }
+};
+
+/* Intel Pentium M processor 770 2.13 GHz */
+static const struct fq_info pentium_m_n770[] = {
+       { 2133, 1551 },
+       { 1800, 1429 },
+       { 1600, 1356 },
+       { 1400, 1180 },
+       { 1200, 1132 },
+       { 1000, 1084 },
+       {  800, 1036 },
+       {  600,  988 }
+};
+
+/* Intel Pentium M processor 770 2.13 GHz */
+static const struct fq_info pentium_m_n770_2[] = {
+       { 2133, 1356 },
+       { 1867, 1292 },
+       { 1600, 1212 },
+       { 1333, 1148 },
+       { 1067, 1068 },
+       {  800,  988 }
+};
+
+/* Intel Pentium Core Duo T2300 */
+static const struct fq_info pentium_core_duo_t2300[] = {
+       { 1666, 1404 },
+       { 1500, 1404 },
+       { 1333, 1404 },
+       { 1167, 1404 },
+       { 1000, 1004 },
+       {  667, 1004 },
+       {  333, 1004 },
+       {  167, 1004 },
+};
+
+static const struct fq_info pentium_core2_duo_t7500[] = {
+       { 2200, 1420 },
+       { 1600, 1212 },
+       { 1200, 1068 },
+       {  800,  988 },
+};
+
+struct fqlist {
+       const char *brand_tag;
+       const u_int cpu_id;
+       size_t tablec;
+       const struct fq_info *table;
+       const int fsbmult; /* in multiples of 133 MHz */
+};
+
+#define ENTRY(s, i, v, f)      { s, i, sizeof(v) / sizeof((v)[0]), v, f }
+static const struct fqlist pentium_m[] = { /* Banias */
+       ENTRY(" 900", 0x0695, pentium_m_900,  3),
+       ENTRY("1000", 0x0695, pentium_m_1000, 3),
+       ENTRY("1100", 0x0695, pentium_m_1100, 3),
+       ENTRY("1200", 0x0695, pentium_m_1200, 3),
+       ENTRY("1300", 0x0695, pentium_m_1300, 3),
+       ENTRY("1300", 0x0695, pentium_m_1300_lv, 3),
+       ENTRY("1400", 0x0695, pentium_m_1400, 3),
+       ENTRY("1500", 0x0695, pentium_m_1500, 3),
+       ENTRY("1600", 0x0695, pentium_m_1600, 3),
+       ENTRY("1700", 0x0695, pentium_m_1700, 3),
+};
+
+static const struct fqlist pentium_m_dothan[] = {
+
+       /* low voltage CPUs */
+       ENTRY("1.00", 0x06d8, pentium_m_n723, 3),
+       ENTRY("1.10", 0x06d6, pentium_m_n733, 3),
+       ENTRY("1.20", 0x06d8, pentium_m_n753, 3),
+       ENTRY("1.30", 0, pentium_m_n773, 3), /* does this exist? */
+
+       /* ultra low voltage CPUs */
+       ENTRY("1.40", 0x06d6, pentium_m_n738, 3),
+       ENTRY("1.50", 0x06d8, pentium_m_n758, 3),
+       ENTRY("1.60", 0x06d8, pentium_m_n778, 3),
+
+       /* 'regular' 400 MHz FSB CPUs */
+       ENTRY("1.40", 0x06d6, pentium_m_n710, 3),
+       ENTRY("1.50", 0x06d6, pentium_m_n715, 3),
+       ENTRY("1.50", 0x06d8, pentium_m_n715, 3),
+       ENTRY("1.60", 0x06d6, pentium_m_n725, 3),
+       ENTRY("1.70", 0x06d6, pentium_m_n735, 3),
+       ENTRY("1.80", 0x06d6, pentium_m_n745, 3),
+       ENTRY("2.00", 0x06d6, pentium_m_n755, 3),
+       ENTRY("2.10", 0x06d6, pentium_m_n765, 3),
+
+       /* 533 MHz FSB CPUs */
+       ENTRY("1.60", 0x06d8, pentium_m_n730, 4),
+       ENTRY("1.73", 0x06d8, pentium_m_n740, 4),
+       ENTRY("1.73", 0x06d8, pentium_m_n740_2, 4),
+       ENTRY("1.86", 0x06d8, pentium_m_n750, 4),
+       ENTRY("1.86", 0x06d8, pentium_m_n750_2, 4),
+       ENTRY("2.00", 0x06d8, pentium_m_n760, 4),
+       ENTRY("2.00", 0x06d8, pentium_m_n760_2, 4),
+       ENTRY("2.13", 0x06d8, pentium_m_n770, 4),
+       ENTRY("2.13", 0x06d8, pentium_m_n770_2, 4),
+
+
+};
+
+static const struct fqlist pentium_yonah[] = {
+
+       /* 666 MHz FSB CPUs */
+        ENTRY("1.66", 0x06e8, pentium_core_duo_t2300, 5 ),
+};
+
+static const struct fqlist pentium_merom[] = {
+
+       /* 800 MHz FSB CPUs */
+        ENTRY("2.20", 0x06fa, pentium_core2_duo_t7500, 6 ),
+};
+
+#undef ENTRY
+
+struct est_cpu {
+       const char *brand_prefix;
+       const char *brand_suffix;
+       size_t listc;
+       const struct fqlist *list;
+};
+
+static const struct est_cpu est_cpus[] = {
+       {
+               "Intel(R) Pentium(R) M processor ", "MHz",
+               (sizeof(pentium_m) / sizeof(pentium_m[0])),
+               pentium_m
+       },
+       {
+               "Intel(R) Pentium(R) M processor ", "GHz",
+               (sizeof(pentium_m_dothan) / sizeof(pentium_m_dothan[0])),
+               pentium_m_dothan
+       },
+       {
+               "Genuine Intel(R) CPU           T2300  @ ", "GHz",
+               (sizeof(pentium_yonah) / sizeof(pentium_yonah[0])),
+               pentium_yonah
+       },
+       {
+               "Intel(R) Core(TM)2 Duo CPU     T7500  @ ", "GHz",
+               (sizeof(pentium_merom) / sizeof(pentium_merom[0])),
+               pentium_merom
+       },
+};
+
+#define NESTCPUS  (sizeof(est_cpus) / sizeof(est_cpus[0]))
+
+#define MSR2MV(msr)    (((int) (msr) & 0xff) * 16 + 700)
+#define MSR2MHZ(msr)   (((((int) (msr) >> 8) & 0xff) * 100 * fsbmult + 1)/ 3)
+#define MV2MSR(mv)     ((((int) (mv) - 700) >> 4) & 0xff)
+#define MHZ2MSR(mhz)   (((3 * (mhz + 30) / (100 * fsbmult)) & 0xff) << 8)
+/* XXX 30 is slop to deal with the 33.333 MHz roundoff values */
+
+/*
+ * Names and numbers from IA-32 System Programming Guide
+ * (not found in <machine/specialregs.h>
+ */
+#define MSR_PERF_STATUS                0x198
+#define MSR_PERF_CTL           0x199
+
+static const struct fqlist *est_fqlist;        /* not NULL if functional */
+static int     fsbmult;
+
+static const char est_desc[] = "Enhanced SpeedStep";
+
+static char freqs_available[80];
+
+static int
+est_sysctl_helper(SYSCTL_HANDLER_ARGS)
+{
+       uint64_t msr;
+       int      fq, oldfq, err = 0;
+       int      i;
+
+       if (est_fqlist == NULL)
+               return (EOPNOTSUPP);
+
+       oldfq = MSR2MHZ(rdmsr(MSR_PERF_CTL));
+
+       if (req->newptr != NULL) {
+               err = SYSCTL_IN(req, &fq, sizeof(fq));
+               if (err)
+                       return err;
+
+               if (fq != oldfq) {
+                       for (i = est_fqlist->tablec - 1; i > 0; i--) {
+                               if (est_fqlist->table[i].mhz >= fq)
+                                       break;
+                       }
+                       fq = est_fqlist->table[i].mhz;
+                       msr = (rdmsr(MSR_PERF_CTL) & ~0xffffULL) |
+                           MV2MSR(est_fqlist->table[i].mv) |
+                           MHZ2MSR(est_fqlist->table[i].mhz);
+                       wrmsr(MSR_PERF_CTL, msr);
+               }
+       } else {
+               err = SYSCTL_OUT(req, &oldfq, sizeof(oldfq));
+       }
+
+       return err;
+}
+
+/*
+ * Look for a CPU matching hw.model
+ */
+static const struct fqlist *
+findcpu(const char *hwmodel, int mv)
+{
+       const struct est_cpu    *ccpu;
+       const struct fqlist     *fql;
+       const char              *tag;
+       size_t                  len;
+       size_t                  i;
+       int k;
+
+       for (ccpu = est_cpus; ccpu < est_cpus + NESTCPUS; ++ccpu) {
+               len = strlen(ccpu->brand_prefix);
+               if (strncmp(ccpu->brand_prefix, hwmodel, len) != 0)
+                       continue;
+               tag = hwmodel + len;
+               for (i = 0; i < ccpu->listc; i++) {
+                       fql = &ccpu->list[i];
+                       len = strlen(fql->brand_tag);
+                       if (strncmp(fql->brand_tag, tag, len) != 0 ||
+                           strcmp(ccpu->brand_suffix, tag + len))
+                               continue;
+
+                       if (fql->cpu_id == 0 || fql->cpu_id == cpu_id) {
+                               /* verify operating point is in table, because
+                                  CPUID + brand_tag still isn't unique. */
+                               for (k = fql->tablec - 1; k >= 0; k--) {
+                                       if (fql->table[k].mv == mv)
+                                               return fql;
+                               }
+                       }
+               }
+       }
+       return(NULL);
+}
+
+
+static struct sysctl_ctx_list  machdep_est_ctx;
+
+static int
+est_init(void)
+{
+       char                    hwmodel[128];
+       int                     mib[] = { CTL_HW, HW_MODEL };
+       size_t                  modellen = sizeof(hwmodel);
+       struct sysctl_oid       *oid, *leaf;
+       uint64_t                msr;
+       int                     mv;
+       size_t                  len, freq_len;
+       int                     err;
+       size_t                  i;
+
+       if ((cpu_feature2 & CPUID2_EST) == 0) {
+               kprintf("Enhanced SpeedStep unsupported on this hardware.\n");
+               return(EOPNOTSUPP);
+       }
+
+       modellen = sizeof(hwmodel);
+       err = kernel_sysctl(mib, 2, hwmodel, &modellen, NULL, 0, NULL);
+       if (err) {
+               kprintf("kernel_sysctl hw.model failed\n");
+               return(err);
+       }
+
+       msr = rdmsr(MSR_PERF_STATUS);
+       mv = MSR2MV(msr);
+       kprintf("%s (%d mV) ", est_desc, mv);
+
+       est_fqlist = findcpu(hwmodel, mv);
+       if (est_fqlist == NULL) {
+               kprintf(" - unknown CPU or operating point"
+                      "(cpu_id:%#x, msr:%#llx).\n", cpu_id, msr);
+               return(EOPNOTSUPP);
+       }
+
+       /*
+        * OK, tell the user the available frequencies.
+        */
+       fsbmult = est_fqlist->fsbmult;
+       kprintf("%d MHz\n", MSR2MHZ(msr));
+
+       freq_len = est_fqlist->tablec * (sizeof("9999 ")-1) + 1;
+       if (freq_len >= sizeof(freqs_available)) {
+               kprintf("increase the size of freqs_available[]\n");
+               return(ENOMEM);
+       }
+       freqs_available[0] = '\0';
+       len = 0;
+       for (i = 0; i < est_fqlist->tablec; i++) {
+               len += ksnprintf(freqs_available + len, freq_len - len, "%d%s",
+                   est_fqlist->table[i].mhz,
+                   i < est_fqlist->tablec - 1 ? " " : "");
+       }
+       kprintf("%s frequencies available (MHz): %s\n", est_desc,
+              freqs_available);
+
+       /*
+        * Setup the sysctl sub-tree machdep.est.*
+        */
+       oid = SYSCTL_ADD_NODE(&machdep_est_ctx,
+           SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO, "est",
+           CTLFLAG_RD, NULL, "");
+       if (oid == NULL)
+               return(EOPNOTSUPP);
+       oid = SYSCTL_ADD_NODE(&machdep_est_ctx, SYSCTL_CHILDREN(oid),
+           OID_AUTO, "frequency", CTLFLAG_RD, NULL, "");
+       if (oid == NULL)
+               return(EOPNOTSUPP);
+       leaf = SYSCTL_ADD_PROC(&machdep_est_ctx, SYSCTL_CHILDREN(oid),
+           OID_AUTO, "target", CTLTYPE_INT | CTLFLAG_RW, NULL, 0,
+           est_sysctl_helper, "I",
+           "Target CPU frequency for Enhanced SpeedStep");
+       if (leaf == NULL)
+               return(EOPNOTSUPP);
+       leaf = SYSCTL_ADD_PROC(&machdep_est_ctx, SYSCTL_CHILDREN(oid),
+           OID_AUTO, "current", CTLTYPE_INT | CTLFLAG_RD, NULL, 0,
+           est_sysctl_helper, "I",
+           "Current CPU frequency for Enhanced SpeedStep");
+       if (leaf == NULL)
+               return(EOPNOTSUPP);
+       leaf = SYSCTL_ADD_STRING(&machdep_est_ctx, SYSCTL_CHILDREN(oid),
+           OID_AUTO, "available", CTLFLAG_RD, freqs_available,
+           sizeof(freqs_available),
+           "CPU frequencies supported by Enhanced SpeedStep");
+       if (leaf == NULL)
+               return(EOPNOTSUPP);
+
+       return(0);
+}
+
+static int
+est_modevh(struct module *m __unused, int what, void *arg __unused)
+{
+       int error;
+
+       switch (what) {
+       case MOD_LOAD:
+               error = sysctl_ctx_init(&machdep_est_ctx);
+               if (error != 0)
+                       break;
+               error = est_init();
+               break;
+       case MOD_UNLOAD:
+               error = sysctl_ctx_free(&machdep_est_ctx);
+               break;
+       default:
+               error = EINVAL;
+               break;
+       }
+       return(error);
+}
+
+static moduledata_t est_mod = {
+       "est",
+       est_modevh,
+       NULL,
+};
+
+DECLARE_MODULE(est, est_mod, SI_BOOT2_KLD, SI_ORDER_ANY);
index 7c5ce7e..974e5a3 100644 (file)
@@ -94,8 +94,8 @@ static struct {
        char    *cpu_name;
        int     cpu_class;
 } amd64_cpus[] = {
-       { "Clawhammer",         CPUCLASS_K8 },          /* CPU_CLAWHAMMER */
-       { "Sledgehammer",       CPUCLASS_K8 },          /* CPU_SLEDGEHAMMER */
+       { "Clawhammer",         CPUCLASS_386 },         /* CPU_CLAWHAMMER */
+       { "Sledgehammer",       CPUCLASS_386 },         /* CPU_SLEDGEHAMMER */
 };
 
 int cpu_cores;
@@ -157,7 +157,7 @@ printcpuinfo(void)
 
        kprintf("%s (", cpu_model);
        switch(cpu_class) {
-       case CPUCLASS_K8:
+       case CPUCLASS_386:
 #if JG
                hw_clockrate = (tsc_freq + 5000) / 1000000;
                kprintf("%jd.%02d-MHz ",
@@ -399,9 +399,19 @@ panicifcpuunsupported(void)
         * let them know if that machine type isn't configured.
         */
        switch (cpu_class) {
-       case CPUCLASS_X86:
-#ifndef HAMMER_CPU
-       case CPUCLASS_K8:
+       /*
+        * A 286 and 386 should not make it this far, anyway.
+        */
+       case CPUCLASS_286:
+       case CPUCLASS_386:
+#if !defined(I486_CPU)
+       case CPUCLASS_486:
+#endif
+#if !defined(I586_CPU)
+       case CPUCLASS_586:
+#endif
+#if !defined(I686_CPU)
+       case CPUCLASS_686:
 #endif
                panic("CPU class not configured");
        default:
@@ -464,7 +474,7 @@ identify_cpu(void)
        }
 
        /* XXX */
-       cpu = CPU_CLAWHAMMER;
+       cpu = CPU_386SX;
 }
 
 static void
diff --git a/sys/platform/pc64/apm/apm.c b/sys/platform/pc64/apm/apm.c
new file mode 100644 (file)
index 0000000..358c602
--- /dev/null
@@ -0,0 +1,1379 @@
+/*
+ * APM (Advanced Power Management) BIOS Device Driver
+ *
+ * Copyright (c) 1994 UKAI, Fumitoshi.
+ * Copyright (c) 1994-1995 by HOSOKAWA, Tatsumi <hosokawa@jp.FreeBSD.org>
+ * Copyright (c) 1996 Nate Williams <nate@FreeBSD.org>
+ * Copyright (c) 1997 Poul-Henning Kamp <phk@FreeBSD.org>
+ *
+ * This software may be used, modified, copied, and distributed, in
+ * both source and binary form provided that the above copyright and
+ * these terms are retained. Under no circumstances is the author
+ * responsible for the proper functioning of this software, nor does
+ * the author assume any responsibility for damages incurred with its
+ * use.
+ *
+ * Sep, 1994   Implemented on FreeBSD 1.1.5.1R (Toshiba AVS001WD)
+ *
+ * $FreeBSD: src/sys/i386/apm/apm.c,v 1.114.2.5 2002/11/02 04:41:50 iwasaki Exp $
+ * $DragonFly: src/sys/platform/pc32/apm/apm.c,v 1.21 2006/12/23 00:27:03 swildner Exp $
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/eventhandler.h>
+#include <sys/conf.h>
+#include <sys/device.h>
+#include <sys/kernel.h>
+#include <sys/time.h>
+#include <sys/reboot.h>
+#include <sys/bus.h>
+#include <sys/selinfo.h>
+#include <sys/poll.h>
+#include <sys/fcntl.h>
+#include <sys/uio.h>
+#include <sys/signalvar.h>
+#include <sys/sysctl.h>
+#include <machine/apm_bios.h>
+#include <machine/segments.h>
+#include <machine/clock.h>
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/pmap.h>
+#include <sys/syslog.h>
+#include <sys/thread2.h>
+
+#include <machine/pc/bios.h>
+#include <machine/vm86.h>
+
+#include <machine_base/apm/apm.h>
+
+/* Used by the apm_saver screen saver module */
+int apm_display (int newstate);
+struct apm_softc apm_softc;
+
+static void apm_resume (void);
+static int apm_bioscall(void);
+static int apm_check_function_supported (u_int version, u_int func);
+
+static u_long  apm_version;
+
+int    apm_evindex;
+
+#define        SCFLAG_ONORMAL  0x0000001
+#define        SCFLAG_OCTL     0x0000002
+#define        SCFLAG_OPEN     (SCFLAG_ONORMAL|SCFLAG_OCTL)
+
+#define APMDEV(dev)    (minor(dev)&0x0f)
+#define APMDEV_NORMAL  0
+#define APMDEV_CTL     8
+
+static struct apmhook  *hook[NAPM_HOOK];               /* XXX */
+
+#define is_enabled(foo) ((foo) ? "enabled" : "disabled")
+
+/* Map version number to integer (keeps ordering of version numbers) */
+#define INTVERSION(major, minor)       ((major)*100 + (minor))
+
+static struct callout apm_timeout_ch;
+
+static timeout_t apm_timeout;
+static d_open_t apmopen;
+static d_close_t apmclose;
+static d_write_t apmwrite;
+static d_ioctl_t apmioctl;
+static d_poll_t apmpoll;
+
+#define CDEV_MAJOR 39
+static struct dev_ops apm_ops = {
+       { "apm", CDEV_MAJOR, 0 },
+       .d_open =       apmopen,
+       .d_close =      apmclose,
+       .d_write =      apmwrite,
+       .d_ioctl =      apmioctl,
+       .d_poll =       apmpoll,
+};
+
+static int apm_suspend_delay = 1;
+static int apm_standby_delay = 1;
+static int apm_debug = 0;
+
+#define APM_DPRINT(args...) do {                                       \
+       if (apm_debug) {                                                \
+               kprintf(args);                                          \
+       }                                                               \
+} while (0)
+
+SYSCTL_INT(_machdep, OID_AUTO, apm_suspend_delay, CTLFLAG_RW, &apm_suspend_delay, 1, "");
+SYSCTL_INT(_machdep, OID_AUTO, apm_standby_delay, CTLFLAG_RW, &apm_standby_delay, 1, "");
+SYSCTL_INT(_debug, OID_AUTO, apm_debug, CTLFLAG_RW, &apm_debug, 0, "");
+
+/*
+ * return  0 if the function successfull,
+ * return  1 if the function unsuccessfull,
+ * return -1 if the function unsupported.
+ */
+static int
+apm_bioscall(void)
+{
+       struct apm_softc *sc = &apm_softc;
+       int error = 0;
+       u_int apm_func = sc->bios.r.eax & 0xff;
+
+       if (!apm_check_function_supported(sc->intversion, apm_func)) {
+               APM_DPRINT("apm_bioscall: function 0x%x is not supported in v%d.%d\n",
+                   apm_func, sc->majorversion, sc->minorversion);
+               return (-1);
+       }
+
+       sc->bios_busy = 1;
+       if (sc->connectmode == APM_PROT32CONNECT) {
+               set_bios_selectors(&sc->bios.seg,
+                                  BIOSCODE_FLAG | BIOSDATA_FLAG);
+               error = bios32(&sc->bios.r,
+                              sc->bios.entry, GSEL(GBIOSCODE32_SEL, SEL_KPL));
+       } else {
+               error = bios16(&sc->bios, NULL);
+       }
+       sc->bios_busy = 0;
+       return (error);
+}
+
+/* check whether APM function is supported (1)  or not (0). */
+static int
+apm_check_function_supported(u_int version, u_int func)
+{
+       /* except driver version */
+       if (func == APM_DRVVERSION) {
+               return (1);
+       }
+
+       switch (version) {
+       case INTVERSION(1, 0):
+               if (func > APM_GETPMEVENT) {
+                       return (0); /* not supported */
+               }
+               break;
+       case INTVERSION(1, 1):
+               if (func > APM_ENGAGEDISENGAGEPM &&
+                   func < APM_OEMFUNC) {
+                       return (0); /* not supported */
+               }
+               break;
+       case INTVERSION(1, 2):
+               break;
+       }
+
+       return (1); /* supported */
+}
+
+/* enable/disable power management */
+static int
+apm_enable_disable_pm(int enable)
+{
+       struct apm_softc *sc = &apm_softc;
+
+       sc->bios.r.eax = (APM_BIOS << 8) | APM_ENABLEDISABLEPM;
+
+       if (sc->intversion >= INTVERSION(1, 1))
+               sc->bios.r.ebx  = PMDV_ALLDEV;
+       else
+               sc->bios.r.ebx  = 0xffff;       /* APM version 1.0 only */
+       sc->bios.r.ecx  = enable;
+       sc->bios.r.edx = 0;
+       return (apm_bioscall());
+}
+
+/* register driver version (APM 1.1 or later) */
+static int
+apm_driver_version(int version)
+{
+       struct apm_softc *sc = &apm_softc;
+
+       sc->bios.r.eax = (APM_BIOS << 8) | APM_DRVVERSION;
+       sc->bios.r.ebx  = 0x0;
+       sc->bios.r.ecx  = version;
+       sc->bios.r.edx = 0;
+
+       if (apm_bioscall() == 0 && sc->bios.r.eax == version)
+               return (0);
+
+       /* Some old BIOSes don't return the connection version in %ax. */
+       if (sc->bios.r.eax == ((APM_BIOS << 8) | APM_DRVVERSION))
+               return (0);
+
+       return (1);
+}
+
+/* engage/disengage power management (APM 1.1 or later) */
+static int
+apm_engage_disengage_pm(int engage)
+{
+       struct apm_softc *sc = &apm_softc;
+
+       sc->bios.r.eax = (APM_BIOS << 8) | APM_ENGAGEDISENGAGEPM;
+       sc->bios.r.ebx = PMDV_ALLDEV;
+       sc->bios.r.ecx = engage;
+       sc->bios.r.edx = 0;
+       return (apm_bioscall());
+}
+
+/* get PM event */
+static u_int
+apm_getevent(void)
+{
+       struct apm_softc *sc = &apm_softc;
+
+       sc->bios.r.eax = (APM_BIOS << 8) | APM_GETPMEVENT;
+
+       sc->bios.r.ebx = 0;
+       sc->bios.r.ecx = 0;
+       sc->bios.r.edx = 0;
+       if (apm_bioscall())
+               return (PMEV_NOEVENT);
+       return (sc->bios.r.ebx & 0xffff);
+}
+
+/* suspend entire system */
+static int
+apm_suspend_system(int state)
+{
+       struct apm_softc *sc = &apm_softc;
+
+       sc->bios.r.eax = (APM_BIOS << 8) | APM_SETPWSTATE;
+       sc->bios.r.ebx = PMDV_ALLDEV;
+       sc->bios.r.ecx = state;
+       sc->bios.r.edx = 0;
+
+       if (apm_bioscall()) {
+               kprintf("Entire system suspend failure: errcode = %d\n",
+                       0xff & (sc->bios.r.eax >> 8));
+               return 1;
+       }
+       return 0;
+}
+
+/* Display control */
+/*
+ * Experimental implementation: My laptop machine can't handle this function
+ * If your laptop can control the display via APM, please inform me.
+ *                            HOSOKAWA, Tatsumi <hosokawa@jp.FreeBSD.org>
+ */
+int
+apm_display(int newstate)
+{
+       struct apm_softc *sc = &apm_softc;
+
+       sc->bios.r.eax = (APM_BIOS << 8) | APM_SETPWSTATE;
+       sc->bios.r.ebx = PMDV_DISP0;
+       sc->bios.r.ecx = newstate ? PMST_APMENABLED:PMST_SUSPEND;
+       sc->bios.r.edx = 0;
+       if (apm_bioscall() == 0) {
+               return 0;
+       }
+
+       /* If failed, then try to blank all display devices instead. */
+       sc->bios.r.eax = (APM_BIOS << 8) | APM_SETPWSTATE;
+       sc->bios.r.ebx = PMDV_DISPALL;  /* all display devices */
+       sc->bios.r.ecx = newstate ? PMST_APMENABLED:PMST_SUSPEND;
+       sc->bios.r.edx = 0;
+       if (apm_bioscall() == 0) {
+               return 0;
+       }
+       kprintf("Display off failure: errcode = %d\n",
+              0xff & (sc->bios.r.eax >> 8));
+       return 1;
+}
+
+/*
+ * Turn off the entire system.
+ */
+static void
+apm_power_off(void *junk, int howto)
+{
+       struct apm_softc *sc = &apm_softc;
+
+       /* Not halting powering off, or not active */
+       if (!(howto & RB_POWEROFF) || !apm_softc.active)
+               return;
+       sc->bios.r.eax = (APM_BIOS << 8) | APM_SETPWSTATE;
+       sc->bios.r.ebx = PMDV_ALLDEV;
+       sc->bios.r.ecx = PMST_OFF;
+       sc->bios.r.edx = 0;
+       apm_bioscall();
+}
+
+/* APM Battery low handler */
+static void
+apm_battery_low(void)
+{
+       kprintf("\007\007 * * * BATTERY IS LOW * * * \007\007");
+}
+
+/* APM hook manager */
+static struct apmhook *
+apm_add_hook(struct apmhook **list, struct apmhook *ah)
+{
+       struct apmhook *p, *prev;
+
+       APM_DPRINT("Add hook \"%s\"\n", ah->ah_name);
+
+       crit_enter();
+       if (ah == NULL)
+               panic("illegal apm_hook!");
+       prev = NULL;
+       for (p = *list; p != NULL; prev = p, p = p->ah_next)
+               if (p->ah_order > ah->ah_order)
+                       break;
+
+       if (prev == NULL) {
+               ah->ah_next = *list;
+               *list = ah;
+       } else {
+               ah->ah_next = prev->ah_next;
+               prev->ah_next = ah;
+       }
+       crit_exit();
+       return ah;
+}
+
+static void
+apm_del_hook(struct apmhook **list, struct apmhook *ah)
+{
+       struct apmhook *p, *prev;
+
+       crit_enter();
+       prev = NULL;
+       for (p = *list; p != NULL; prev = p, p = p->ah_next)
+               if (p == ah)
+                       goto deleteit;
+       panic("Tried to delete unregistered apm_hook.");
+       goto nosuchnode;
+deleteit:
+       if (prev != NULL)
+               prev->ah_next = p->ah_next;
+       else
+               *list = p->ah_next;
+nosuchnode:
+       crit_exit();
+}
+
+
+/* APM driver calls some functions automatically */
+static void
+apm_execute_hook(struct apmhook *list)
+{
+       struct apmhook *p;
+
+       for (p = list; p != NULL; p = p->ah_next) {
+               APM_DPRINT("Execute APM hook \"%s.\"\n", p->ah_name);
+               if ((*(p->ah_fun))(p->ah_arg))
+                       kprintf("Warning: APM hook \"%s\" failed", p->ah_name);
+       }
+}
+
+
+/* establish an apm hook */
+struct apmhook *
+apm_hook_establish(int apmh, struct apmhook *ah)
+{
+       if (apmh < 0 || apmh >= NAPM_HOOK)
+               return NULL;
+
+       return apm_add_hook(&hook[apmh], ah);
+}
+
+/* disestablish an apm hook */
+void
+apm_hook_disestablish(int apmh, struct apmhook *ah)
+{
+       if (apmh < 0 || apmh >= NAPM_HOOK)
+               return;
+
+       apm_del_hook(&hook[apmh], ah);
+}
+
+
+static struct timeval suspend_time;
+static struct timeval diff_time;
+
+static int
+apm_default_resume(void *arg)
+{
+       u_int second, minute, hour;
+       struct timeval resume_time, tmp_time;
+
+       /* modified for adjkerntz */
+       crit_enter();
+       timer_restore();                /* restore the all timers */
+       inittodr(0);                    /* adjust time to RTC */
+       microtime(&resume_time);
+       getmicrotime(&tmp_time);
+       timevaladd(&tmp_time, &diff_time);
+
+#ifdef FIXME
+       /* XXX THIS DOESN'T WORK!!! */
+       time = tmp_time;
+#endif
+
+#ifdef APM_FIXUP_CALLTODO
+       /* Calculate the delta time suspended */
+       timevalsub(&resume_time, &suspend_time);
+       /* Fixup the calltodo list with the delta time. */
+       adjust_timeout_calltodo(&resume_time);
+#endif /* APM_FIXUP_CALLTODOK */
+       crit_exit();
+#ifndef APM_FIXUP_CALLTODO
+       second = resume_time.tv_sec - suspend_time.tv_sec;
+#else /* APM_FIXUP_CALLTODO */
+       /*
+        * We've already calculated resume_time to be the delta between
+        * the suspend and the resume.
+        */
+       second = resume_time.tv_sec;
+#endif /* APM_FIXUP_CALLTODO */
+       hour = second / 3600;
+       second %= 3600;
+       minute = second / 60;
+       second %= 60;
+       log(LOG_NOTICE, "resumed from suspended mode (slept %02d:%02d:%02d)\n",
+               hour, minute, second);
+       return 0;
+}
+
+static int
+apm_default_suspend(void *arg)
+{
+       crit_enter();
+       microtime(&diff_time);
+       inittodr(0);
+       microtime(&suspend_time);
+       timevalsub(&diff_time, &suspend_time);
+       crit_exit();
+       return 0;
+}
+
+static int apm_record_event (struct apm_softc *, u_int);
+static void apm_processevent(void);
+
+static u_int apm_op_inprog = 0;
+
+static void
+apm_do_suspend(void)
+{
+       struct apm_softc *sc = &apm_softc;
+       int error;
+
+       if (!sc)
+               return;
+
+       apm_op_inprog = 0;
+       sc->suspends = sc->suspend_countdown = 0;
+
+       if (sc->initialized) {
+               error = DEVICE_SUSPEND(root_bus);
+               if (error) {
+                       DEVICE_RESUME(root_bus);
+               } else {
+                       apm_execute_hook(hook[APM_HOOK_SUSPEND]);
+                       if (apm_suspend_system(PMST_SUSPEND) == 0) {
+                               apm_processevent();
+                       } else {
+                               /* Failure, 'resume' the system again */
+                               apm_execute_hook(hook[APM_HOOK_RESUME]);
+                               DEVICE_RESUME(root_bus);
+                       }
+               }
+       }
+}
+
+static void
+apm_do_standby(void)
+{
+       struct apm_softc *sc = &apm_softc;
+
+       if (!sc)
+               return;
+
+       apm_op_inprog = 0;
+       sc->standbys = sc->standby_countdown = 0;
+
+       if (sc->initialized) {
+               /*
+                * As far as standby, we don't need to execute
+                * all of suspend hooks.
+                */
+               apm_default_suspend(&apm_softc);
+               if (apm_suspend_system(PMST_STANDBY) == 0)
+                       apm_processevent();
+       }
+}
+
+static void
+apm_lastreq_notify(void)
+{
+       struct apm_softc *sc = &apm_softc;
+
+       sc->bios.r.eax = (APM_BIOS << 8) | APM_SETPWSTATE;
+       sc->bios.r.ebx = PMDV_ALLDEV;
+       sc->bios.r.ecx = PMST_LASTREQNOTIFY;
+       sc->bios.r.edx = 0;
+       apm_bioscall();
+}
+
+static int
+apm_lastreq_rejected(void)
+{
+       struct apm_softc *sc = &apm_softc;
+
+       if (apm_op_inprog == 0) {
+               return 1;       /* no operation in progress */
+       }
+
+       sc->bios.r.eax = (APM_BIOS << 8) | APM_SETPWSTATE;
+       sc->bios.r.ebx = PMDV_ALLDEV;
+       sc->bios.r.ecx = PMST_LASTREQREJECT;
+       sc->bios.r.edx = 0;
+
+       if (apm_bioscall()) {
+               APM_DPRINT("apm_lastreq_rejected: failed\n");
+               return 1;
+       }
+       apm_op_inprog = 0;
+       return 0;
+}
+
+/*
+ * Public interface to the suspend/resume:
+ *
+ * Execute suspend and resume hook before and after sleep, respectively.
+ *
+ */
+
+void
+apm_suspend(int state)
+{
+       struct apm_softc *sc = &apm_softc;
+
+       if (!sc->initialized)
+               return;
+
+       switch (state) {
+       case PMST_SUSPEND:
+               if (sc->suspends)
+                       return;
+               sc->suspends++;
+               sc->suspend_countdown = apm_suspend_delay;
+               break;
+       case PMST_STANDBY:
+               if (sc->standbys)
+                       return;
+               sc->standbys++;
+               sc->standby_countdown = apm_standby_delay;
+               break;
+       default:
+               kprintf("apm_suspend: Unknown Suspend state 0x%x\n", state);
+               return;
+       }
+
+       apm_op_inprog++;
+       apm_lastreq_notify();
+}
+
+void
+apm_resume(void)
+{
+       struct apm_softc *sc = &apm_softc;
+
+       if (!sc)
+               return;
+
+       if (sc->initialized) {
+               apm_execute_hook(hook[APM_HOOK_RESUME]);
+               DEVICE_RESUME(root_bus);
+       }
+}
+
+
+/* get power status per battery */
+static int
+apm_get_pwstatus(apm_pwstatus_t app)
+{
+       struct apm_softc *sc = &apm_softc;
+
+       if (app->ap_device != PMDV_ALLDEV &&
+           (app->ap_device < PMDV_BATT0 || app->ap_device > PMDV_BATT_ALL))
+               return 1;
+
+       sc->bios.r.eax = (APM_BIOS << 8) | APM_GETPWSTATUS;
+       sc->bios.r.ebx = app->ap_device;
+       sc->bios.r.ecx = 0;
+       sc->bios.r.edx = 0xffff;        /* default to unknown battery time */
+
+       if (apm_bioscall())
+               return 1;
+
+       app->ap_acline    = (sc->bios.r.ebx >> 8) & 0xff;
+       app->ap_batt_stat = sc->bios.r.ebx & 0xff;
+       app->ap_batt_flag = (sc->bios.r.ecx >> 8) & 0xff;
+       app->ap_batt_life = sc->bios.r.ecx & 0xff;
+       sc->bios.r.edx &= 0xffff;
+       if (sc->bios.r.edx == 0xffff)   /* Time is unknown */
+               app->ap_batt_time = -1;
+       else if (sc->bios.r.edx & 0x8000)       /* Time is in minutes */
+               app->ap_batt_time = (sc->bios.r.edx & 0x7fff) * 60;
+       else                            /* Time is in seconds */
+               app->ap_batt_time = sc->bios.r.edx;
+
+       return 0;
+}
+
+
+/* get APM information */
+static int
+apm_get_info(apm_info_t aip)
+{
+       struct apm_softc *sc = &apm_softc;
+       struct apm_pwstatus aps;
+
+       bzero(&aps, sizeof(aps));
+       aps.ap_device = PMDV_ALLDEV;
+       if (apm_get_pwstatus(&aps))
+               return 1;
+
+       aip->ai_infoversion = 1;
+       aip->ai_acline      = aps.ap_acline;
+       aip->ai_batt_stat   = aps.ap_batt_stat;
+       aip->ai_batt_life   = aps.ap_batt_life;
+       aip->ai_batt_time   = aps.ap_batt_time;
+       aip->ai_major       = (u_int)sc->majorversion;
+       aip->ai_minor       = (u_int)sc->minorversion;
+       aip->ai_status      = (u_int)sc->active;
+
+       sc->bios.r.eax = (APM_BIOS << 8) | APM_GETCAPABILITIES;
+       sc->bios.r.ebx = 0;
+       sc->bios.r.ecx = 0;
+       sc->bios.r.edx = 0;
+       if (apm_bioscall()) {
+               aip->ai_batteries = -1; /* Unknown */
+               aip->ai_capabilities = 0xff00; /* Unknown, with no bits set */
+       } else {
+               aip->ai_batteries = sc->bios.r.ebx & 0xff;
+               aip->ai_capabilities = sc->bios.r.ecx & 0xf;
+       }
+
+       bzero(aip->ai_spare, sizeof aip->ai_spare);
+
+       return 0;
+}
+
+
+/* inform APM BIOS that CPU is idle */
+void
+apm_cpu_idle(void)
+{
+       struct apm_softc *sc = &apm_softc;
+
+       if (sc->active) {
+
+               sc->bios.r.eax = (APM_BIOS <<8) | APM_CPUIDLE;
+               sc->bios.r.edx = sc->bios.r.ecx = sc->bios.r.ebx = 0;
+               apm_bioscall();
+       }
+       /*
+        * Some APM implementation halts CPU in BIOS, whenever
+        * "CPU-idle" function are invoked, but swtch() of
+        * FreeBSD halts CPU, therefore, CPU is halted twice
+        * in the sched loop. It makes the interrupt latency
+        * terribly long and be able to cause a serious problem
+        * in interrupt processing. We prevent it by removing
+        * "hlt" operation from swtch() and managed it under
+        * APM driver.
+        */
+       if (!sc->active || sc->always_halt_cpu)
+               __asm("hlt");   /* wait for interrupt */
+}
+
+/* inform APM BIOS that CPU is busy */
+void
+apm_cpu_busy(void)
+{
+       struct apm_softc *sc = &apm_softc;
+
+       /*
+        * The APM specification says this is only necessary if your BIOS
+        * slows down the processor in the idle task, otherwise it's not
+        * necessary.
+        */
+       if (sc->slow_idle_cpu && sc->active) {
+
+               sc->bios.r.eax = (APM_BIOS <<8) | APM_CPUBUSY;
+               sc->bios.r.edx = sc->bios.r.ecx = sc->bios.r.ebx = 0;
+               apm_bioscall();
+       }
+}
+
+
+/*
+ * APM timeout routine:
+ *
+ * This routine is automatically called by timer once per second.
+ */
+
+static void
+apm_timeout(void *dummy)
+{
+       struct apm_softc *sc = &apm_softc;
+
+       if (apm_op_inprog)
+               apm_lastreq_notify();
+
+       if (sc->standbys && sc->standby_countdown-- <= 0)
+               apm_do_standby();
+
+       if (sc->suspends && sc->suspend_countdown-- <= 0)
+               apm_do_suspend();
+
+       if (!sc->bios_busy)
+               apm_processevent();
+
+       if (sc->active == 1) {
+               /* Run slightly more oftan than 1 Hz */
+               callout_reset(&apm_timeout_ch, hz - 1, apm_timeout, NULL);
+       }
+}
+
+/* enable APM BIOS */
+static void
+apm_event_enable(void)
+{
+       struct apm_softc *sc = &apm_softc;
+
+       APM_DPRINT("called apm_event_enable()\n");
+       if (sc->initialized) {
+               sc->active = 1;
+               callout_init(&apm_timeout_ch);
+               apm_timeout(sc);
+       }
+}
+
+/* disable APM BIOS */
+static void
+apm_event_disable(void)
+{
+       struct apm_softc *sc = &apm_softc;
+
+       APM_DPRINT("called apm_event_disable()\n");
+       if (sc->initialized) {
+               callout_stop(&apm_timeout_ch);
+               sc->active = 0;
+       }
+}
+
+/* halt CPU in scheduling loop */
+static void
+apm_halt_cpu(void)
+{
+       struct apm_softc *sc = &apm_softc;
+
+       if (sc->initialized)
+               sc->always_halt_cpu = 1;
+}
+
+/* don't halt CPU in scheduling loop */
+static void
+apm_not_halt_cpu(void)
+{
+       struct apm_softc *sc = &apm_softc;
+
+       if (sc->initialized)
+               sc->always_halt_cpu = 0;
+}
+
+/* device driver definitions */
+
+/*
+ * probe for APM BIOS
+ */
+static int
+apm_probe(device_t dev)
+{
+#define APM_KERNBASE   KERNBASE
+       struct vm86frame        vmf;
+       struct apm_softc        *sc = &apm_softc;
+       int                     disabled, flags;
+
+       if (resource_int_value("apm", 0, "disabled", &disabled) == 0
+           && disabled != 0)
+               return ENXIO;
+
+       device_set_desc(dev, "APM BIOS");
+
+       if ( device_get_unit(dev) > 0 ) {
+               kprintf("apm: Only one APM driver supported.\n");
+               return ENXIO;
+       }
+
+       if (resource_int_value("apm", 0, "flags", &flags) != 0)
+               flags = 0;
+
+       bzero(&vmf, sizeof(struct vm86frame));          /* safety */
+       bzero(&apm_softc, sizeof(apm_softc));
+       vmf.vmf_ah = APM_BIOS;
+       vmf.vmf_al = APM_INSTCHECK;
+       vmf.vmf_bx = 0;
+       if (vm86_intcall(APM_INT, &vmf))
+               return ENXIO;                   /* APM not found */
+       if (vmf.vmf_bx != 0x504d) {
+               kprintf("apm: incorrect signature (0x%x)\n", vmf.vmf_bx);
+               return ENXIO;
+       }
+       if ((vmf.vmf_cx & (APM_32BIT_SUPPORT | APM_16BIT_SUPPORT)) == 0) {
+               kprintf("apm: protected mode connections are not supported\n");
+               return ENXIO;
+       }
+
+       apm_version = vmf.vmf_ax;
+       sc->slow_idle_cpu = ((vmf.vmf_cx & APM_CPUIDLE_SLOW) != 0);
+       sc->disabled = ((vmf.vmf_cx & APM_DISABLED) != 0);
+       sc->disengaged = ((vmf.vmf_cx & APM_DISENGAGED) != 0);
+
+       vmf.vmf_ah = APM_BIOS;
+       vmf.vmf_al = APM_DISCONNECT;
+       vmf.vmf_bx = 0;
+        vm86_intcall(APM_INT, &vmf);           /* disconnect, just in case */
+
+       if ((vmf.vmf_cx & APM_32BIT_SUPPORT) != 0) {
+               vmf.vmf_ah = APM_BIOS;
+               vmf.vmf_al = APM_PROT32CONNECT;
+               vmf.vmf_bx = 0;
+               if (vm86_intcall(APM_INT, &vmf)) {
+                       kprintf("apm: 32-bit connection error.\n");
+                       return (ENXIO);
+               }
+               sc->bios.seg.code32.base = (vmf.vmf_ax << 4) + APM_KERNBASE;
+               sc->bios.seg.code32.limit = 0xffff;
+               sc->bios.seg.code16.base = (vmf.vmf_cx << 4) + APM_KERNBASE;
+               sc->bios.seg.code16.limit = 0xffff;
+               sc->bios.seg.data.base = (vmf.vmf_dx << 4) + APM_KERNBASE;
+               sc->bios.seg.data.limit = 0xffff;
+               sc->bios.entry = vmf.vmf_ebx;
+               sc->connectmode = APM_PROT32CONNECT;
+       } else {
+               /* use 16-bit connection */
+               vmf.vmf_ah = APM_BIOS;
+               vmf.vmf_al = APM_PROT16CONNECT;
+               vmf.vmf_bx = 0;
+               if (vm86_intcall(APM_INT, &vmf)) {
+                       kprintf("apm: 16-bit connection error.\n");
+                       return (ENXIO);
+               }
+               sc->bios.seg.code16.base = (vmf.vmf_ax << 4) + APM_KERNBASE;
+               sc->bios.seg.code16.limit = 0xffff;
+               sc->bios.seg.data.base = (vmf.vmf_cx << 4) + APM_KERNBASE;
+               sc->bios.seg.data.limit = 0xffff;
+               sc->bios.entry = vmf.vmf_bx;
+               sc->connectmode = APM_PROT16CONNECT;
+       }
+       return(0);
+}
+
+
+/*
+ * return 0 if the user will notice and handle the event,
+ * return 1 if the kernel driver should do so.
+ */
+static int
+apm_record_event(struct apm_softc *sc, u_int event_type)
+{
+       struct apm_event_info *evp;
+
+       if ((sc->sc_flags & SCFLAG_OPEN) == 0)
+               return 1;               /* no user waiting */
+       if (sc->event_count == APM_NEVENTS)
+               return 1;                       /* overflow */
+       if (sc->event_filter[event_type] == 0)
+               return 1;               /* not registered */
+       evp = &sc->event_list[sc->event_ptr];
+       sc->event_count++;
+       sc->event_ptr++;
+       sc->event_ptr %= APM_NEVENTS;
+       evp->type = event_type;
+       evp->index = ++apm_evindex;
+       selwakeup(&sc->sc_rsel);
+       return (sc->sc_flags & SCFLAG_OCTL) ? 0 : 1; /* user may handle */
+}
+
+/* Process APM event */
+static void
+apm_processevent(void)
+{
+       int apm_event;
+       struct apm_softc *sc = &apm_softc;
+
+#define OPMEV_DEBUGMESSAGE(symbol) case symbol:                                \
+       APM_DPRINT("Received APM Event: " #symbol "\n");
+
+       do {
+               apm_event = apm_getevent();
+               switch (apm_event) {
+                   OPMEV_DEBUGMESSAGE(PMEV_STANDBYREQ);
+                       if (apm_op_inprog == 0) {
+                           apm_op_inprog++;
+                           if (apm_record_event(sc, apm_event)) {
+                               apm_suspend(PMST_STANDBY);
+                           }
+                       }
+                       break;
+                   OPMEV_DEBUGMESSAGE(PMEV_USERSTANDBYREQ);
+                       if (apm_op_inprog == 0) {
+                           apm_op_inprog++;
+                           if (apm_record_event(sc, apm_event)) {
+                               apm_suspend(PMST_STANDBY);
+                           }
+                       }
+                       break;
+                   OPMEV_DEBUGMESSAGE(PMEV_SUSPENDREQ);
+                       apm_lastreq_notify();
+                       if (apm_op_inprog == 0) {
+                           apm_op_inprog++;
+                           if (apm_record_event(sc, apm_event)) {
+                               apm_do_suspend();
+                           }
+                       }
+                       return; /* XXX skip the rest */
+                   OPMEV_DEBUGMESSAGE(PMEV_USERSUSPENDREQ);
+                       apm_lastreq_notify();
+                       if (apm_op_inprog == 0) {
+                           apm_op_inprog++;
+                           if (apm_record_event(sc, apm_event)) {
+                               apm_do_suspend();
+                           }
+                       }
+                       return; /* XXX skip the rest */
+                   OPMEV_DEBUGMESSAGE(PMEV_CRITSUSPEND);
+                       apm_do_suspend();
+                       break;
+                   OPMEV_DEBUGMESSAGE(PMEV_NORMRESUME);
+                       apm_record_event(sc, apm_event);
+                       apm_resume();
+                       break;
+                   OPMEV_DEBUGMESSAGE(PMEV_CRITRESUME);
+                       apm_record_event(sc, apm_event);
+                       apm_resume();
+                       break;
+                   OPMEV_DEBUGMESSAGE(PMEV_STANDBYRESUME);
+                       apm_record_event(sc, apm_event);
+                       apm_resume();
+                       break;
+                   OPMEV_DEBUGMESSAGE(PMEV_BATTERYLOW);
+                       if (apm_record_event(sc, apm_event)) {
+                           apm_battery_low();
+                           apm_suspend(PMST_SUSPEND);
+                       }
+                       break;
+                   OPMEV_DEBUGMESSAGE(PMEV_POWERSTATECHANGE);
+                       apm_record_event(sc, apm_event);
+                       break;
+                   OPMEV_DEBUGMESSAGE(PMEV_UPDATETIME);
+                       apm_record_event(sc, apm_event);
+                       inittodr(0);    /* adjust time to RTC */
+                       break;
+                   OPMEV_DEBUGMESSAGE(PMEV_CAPABILITIESCHANGE);
+                       apm_record_event(sc, apm_event);
+                       break;
+                   case PMEV_NOEVENT:
+                       break;
+                   default:
+                       kprintf("Unknown Original APM Event 0x%x\n", apm_event);
+                           break;
+               }
+       } while (apm_event != PMEV_NOEVENT);
+}
+
+/*
+ * Attach APM:
+ *
+ * Initialize APM driver
+ */
+
+static int
+apm_attach(device_t dev)
+{
+       struct apm_softc        *sc = &apm_softc;
+       int                     flags;
+       int                     drv_version;
+
+       if (resource_int_value("apm", 0, "flags", &flags) != 0)
+               flags = 0;
+
+       sc->initialized = 0;
+
+       /* Must be externally enabled */
+       sc->active = 0;
+
+       /* Always call HLT in idle loop */
+       sc->always_halt_cpu = 1;
+
+       kgetenv_int("debug.apm_debug", &apm_debug);
+
+       /* print bootstrap messages */
+       APM_DPRINT("apm: APM BIOS version %04lx\n",  apm_version);
+       APM_DPRINT("apm: Code16 0x%08x, Data 0x%08x\n",
+           sc->bios.seg.code16.base, sc->bios.seg.data.base);
+       APM_DPRINT("apm: Code entry 0x%08x, Idling CPU %s, Management %s\n",
+           sc->bios.entry, is_enabled(sc->slow_idle_cpu),
+           is_enabled(!sc->disabled));
+       APM_DPRINT("apm: CS_limit=0x%x, DS_limit=0x%x\n",
+           sc->bios.seg.code16.limit, sc->bios.seg.data.limit);
+
+       /*
+         * In one test, apm bios version was 1.02; an attempt to register
+         * a 1.04 driver resulted in a 1.00 connection!  Registering a
+         * 1.02 driver resulted in a 1.02 connection.
+         */
+       drv_version = apm_version > 0x102 ? 0x102 : apm_version;
+       for (; drv_version > 0x100; drv_version--)
+               if (apm_driver_version(drv_version) == 0)
+                       break;
+       sc->minorversion = ((drv_version & 0x00f0) >>  4) * 10 +
+               ((drv_version & 0x000f) >> 0);
+       sc->majorversion = ((drv_version & 0xf000) >> 12) * 10 +
+               ((apm_version & 0x0f00) >> 8);
+
+       sc->intversion = INTVERSION(sc->majorversion, sc->minorversion);
+
+       if (sc->intversion >= INTVERSION(1, 1))
+               APM_DPRINT("apm: Engaged control %s\n", is_enabled(!sc->disengaged));
+       device_printf(dev, "found APM BIOS v%ld.%ld, connected at v%d.%d\n",
+              ((apm_version & 0xf000) >> 12) * 10 + ((apm_version & 0x0f00) >> 8),
+              ((apm_version & 0x00f0) >> 4) * 10 + ((apm_version & 0x000f) >> 0),
+              sc->majorversion, sc->minorversion);
+
+
+       APM_DPRINT("apm: Slow Idling CPU %s\n", is_enabled(sc->slow_idle_cpu));
+       /* enable power management */
+       if (sc->disabled) {
+               if (apm_enable_disable_pm(1)) {
+                       APM_DPRINT("apm: *Warning* enable function failed! [%x]\n",
+                           (sc->bios.r.eax >> 8) & 0xff);
+               }
+       }
+
+       /* engage power managment (APM 1.1 or later) */
+       if (sc->intversion >= INTVERSION(1, 1) && sc->disengaged) {
+               if (apm_engage_disengage_pm(1)) {
+                       APM_DPRINT("apm: *Warning* engage function failed err=[%x]",
+                           (sc->bios.r.eax >> 8) & 0xff);
+                       APM_DPRINT(" (Docked or using external power?).\n");
+               }
+       }
+
+        /* default suspend hook */
+        sc->sc_suspend.ah_fun = apm_default_suspend;
+        sc->sc_suspend.ah_arg = sc;
+        sc->sc_suspend.ah_name = "default suspend";
+        sc->sc_suspend.ah_order = APM_MAX_ORDER;
+
+        /* default resume hook */
+        sc->sc_resume.ah_fun = apm_default_resume;
+        sc->sc_resume.ah_arg = sc;
+        sc->sc_resume.ah_name = "default resume";
+        sc->sc_resume.ah_order = APM_MIN_ORDER;
+
+        apm_hook_establish(APM_HOOK_SUSPEND, &sc->sc_suspend);
+        apm_hook_establish(APM_HOOK_RESUME , &sc->sc_resume);
+
+       /* Power the system off using APM */
+       EVENTHANDLER_REGISTER(shutdown_final, apm_power_off, NULL,
+                             SHUTDOWN_PRI_LAST);
+
+       sc->initialized = 1;
+
+       dev_ops_add(&apm_ops, 0, 0);
+       make_dev(&apm_ops, 0, UID_ROOT, GID_OPERATOR, 0660, "apm");
+       make_dev(&apm_ops, 8, UID_ROOT, GID_OPERATOR, 0660, "apmctl");
+       return 0;
+}
+
+static int
+apmopen(struct dev_open_args *ap)
+{
+       cdev_t dev = ap->a_head.a_dev;
+       struct apm_softc *sc = &apm_softc;
+       int ctl = APMDEV(dev);
+
+       if (!sc->initialized)
+               return (ENXIO);
+
+       switch (ctl) {
+       case APMDEV_CTL:
+               if (!(ap->a_oflags & FWRITE))
+                       return EINVAL;
+               if (sc->sc_flags & SCFLAG_OCTL)
+                       return EBUSY;
+               sc->sc_flags |= SCFLAG_OCTL;
+               bzero(sc->event_filter, sizeof sc->event_filter);
+               break;
+       case APMDEV_NORMAL:
+               sc->sc_flags |= SCFLAG_ONORMAL;
+               break;
+       default:
+               return ENXIO;
+               break;
+       }
+       return 0;
+}
+
+static int
+apmclose(struct dev_close_args *ap)
+{
+       cdev_t dev = ap->a_head.a_dev;
+       struct apm_softc *sc = &apm_softc;
+       int ctl = APMDEV(dev);
+
+       switch (ctl) {
+       case APMDEV_CTL:
+               apm_lastreq_rejected();
+               sc->sc_flags &= ~SCFLAG_OCTL;
+               bzero(sc->event_filter, sizeof sc->event_filter);
+               break;
+       case APMDEV_NORMAL:
+               sc->sc_flags &= ~SCFLAG_ONORMAL;
+               break;
+       }
+       if ((sc->sc_flags & SCFLAG_OPEN) == 0) {
+               sc->event_count = 0;
+               sc->event_ptr = 0;
+       }
+       return 0;
+}
+
+static int
+apmioctl(struct dev_ioctl_args *ap)
+{
+       cdev_t dev = ap->a_head.a_dev;
+       struct apm_softc *sc = &apm_softc;
+       struct apm_bios_arg *args;
+       int error = 0;
+       int ret;
+       int newstate;
+
+       if (!sc->initialized)
+               return (ENXIO);
+       APM_DPRINT("APM ioctl: cmd = 0x%lx\n", ap->a_cmd);
+
+       switch (ap->a_cmd) {
+       case APMIO_SUSPEND:
+               if (!(ap->a_fflag & FWRITE))
+                       return (EPERM);
+               if (sc->active)
+                       apm_suspend(PMST_SUSPEND);
+               else
+                       error = EINVAL;
+               break;
+
+       case APMIO_STANDBY:
+               if (!(ap->a_fflag & FWRITE))
+                       return (EPERM);
+               if (sc->active)
+                       apm_suspend(PMST_STANDBY);
+               else
+                       error = EINVAL;
+               break;
+
+       case APMIO_GETINFO_OLD:
+               {
+                       struct apm_info info;
+                       apm_info_old_t aiop;
+
+                       if (apm_get_info(&info))
+                               error = ENXIO;
+                       aiop = (apm_info_old_t)ap->a_data;
+                       aiop->ai_major = info.ai_major;
+                       aiop->ai_minor = info.ai_minor;
+                       aiop->ai_acline = info.ai_acline;
+                       aiop->ai_batt_stat = info.ai_batt_stat;
+                       aiop->ai_batt_life = info.ai_batt_life;
+                       aiop->ai_status = info.ai_status;
+               }
+               break;
+       case APMIO_GETINFO:
+               if (apm_get_info((apm_info_t)ap->a_data))
+                       error = ENXIO;
+               break;
+       case APMIO_GETPWSTATUS:
+               if (apm_get_pwstatus((apm_pwstatus_t)ap->a_data))
+                       error = ENXIO;
+               break;
+       case APMIO_ENABLE:
+               if (!(ap->a_fflag & FWRITE))
+                       return (EPERM);
+               apm_event_enable();
+               break;
+       case APMIO_DISABLE:
+               if (!(ap->a_fflag & FWRITE))
+                       return (EPERM);
+               apm_event_disable();
+               break;
+       case APMIO_HALTCPU:
+               if (!(ap->a_fflag & FWRITE))
+                       return (EPERM);
+               apm_halt_cpu();
+               break;
+       case APMIO_NOTHALTCPU:
+               if (!(ap->a_fflag & FWRITE))
+                       return (EPERM);
+               apm_not_halt_cpu();
+               break;
+       case APMIO_DISPLAY:
+               if (!(ap->a_fflag & FWRITE))
+                       return (EPERM);
+               newstate = *(int *)ap->a_data;
+               if (apm_display(newstate))
+                       error = ENXIO;
+               break;
+       case APMIO_BIOS:
+               if (!(ap->a_fflag & FWRITE))
+                       return (EPERM);
+               /* XXX compatibility with the old interface */
+               args = (struct apm_bios_arg *)ap->a_data;
+               sc->bios.r.eax = args->eax;
+               sc->bios.r.ebx = args->ebx;
+               sc->bios.r.ecx = args->ecx;
+               sc->bios.r.edx = args->edx;
+               sc->bios.r.esi = args->esi;
+               sc->bios.r.edi = args->edi;
+               if ((ret = apm_bioscall())) {
+                       /*
+                        * Return code 1 means bios call was unsuccessful.
+                        * Error code is stored in %ah.
+                        * Return code -1 means bios call was unsupported
+                        * in the APM BIOS version.
+                        */
+                       if (ret == -1) {
+                               error = EINVAL;
+                       }
+               } else {
+                       /*
+                        * Return code 0 means bios call was successful.
+                        * We need only %al and can discard %ah.
+                        */
+                       sc->bios.r.eax &= 0xff;
+               }
+               args->eax = sc->bios.r.eax;
+               args->ebx = sc->bios.r.ebx;
+               args->ecx = sc->bios.r.ecx;
+               args->edx = sc->bios.r.edx;
+               args->esi = sc->bios.r.esi;
+               args->edi = sc->bios.r.edi;
+               break;
+       default:
+               error = EINVAL;
+               break;
+       }
+
+       /* for /dev/apmctl */
+       if (APMDEV(dev) == APMDEV_CTL) {
+               struct apm_event_info *evp;
+               int i;
+
+               error = 0;
+               switch (ap->a_cmd) {
+               case APMIO_NEXTEVENT:
+                       if (!sc->event_count) {
+                               error = EAGAIN;
+                       } else {
+                               evp = (struct apm_event_info *)ap->a_data;
+                               i = sc->event_ptr + APM_NEVENTS - sc->event_count;
+                               i %= APM_NEVENTS;
+                               *evp = sc->event_list[i];
+                               sc->event_count--;
+                       }
+                       break;
+               case APMIO_REJECTLASTREQ:
+                       if (apm_lastreq_rejected()) {
+                               error = EINVAL;
+                       }
+                       break;
+               default:
+                       error = EINVAL;
+                       break;
+               }
+       }
+
+       return error;
+}
+
+static int
+apmwrite(struct dev_write_args *ap)
+{
+       cdev_t dev = ap->a_head.a_dev;
+       struct uio *uio = ap->a_uio;
+       struct apm_softc *sc = &apm_softc;
+       u_int event_type;
+       int error;
+       u_char enabled;
+
+       if (APMDEV(dev) != APMDEV_CTL)
+               return(ENODEV);
+       if (uio->uio_resid != sizeof(u_int))
+               return(E2BIG);
+
+       if ((error = uiomove((caddr_t)&event_type, sizeof(u_int), uio)))
+               return(error);
+
+       if (event_type < 0 || event_type >= APM_NPMEV)
+               return(EINVAL);
+
+       if (sc->event_filter[event_type] == 0) {
+               enabled = 1;
+       } else {
+               enabled = 0;
+       }
+       sc->event_filter[event_type] = enabled;
+       APM_DPRINT("apmwrite: event 0x%x %s\n", event_type, is_enabled(enabled));
+
+       return uio->uio_resid;
+}
+
+static int
+apmpoll(struct dev_poll_args *ap)
+{
+       struct apm_softc *sc = &apm_softc;
+       int revents = 0;
+
+       if (ap->a_events & (POLLIN | POLLRDNORM)) {
+               if (sc->event_count) {
+                       revents |= ap->a_events & (POLLIN | POLLRDNORM);
+               } else {
+                       selrecord(curthread, &sc->sc_rsel);
+               }
+       }
+       ap->a_events = revents;
+       return (0);
+}
+
+/*
+ * Because apm is a static device that always exists under any attached
+ * isa device, and not scanned by the isa device, we need an identify
+ * function to install the device so we can probe for it.
+ */
+static device_method_t apm_methods[] = {
+       /* Device interface */
+       DEVMETHOD(device_identify,      bus_generic_identify),
+       DEVMETHOD(device_probe,         apm_probe),
+       DEVMETHOD(device_attach,        apm_attach),
+
+       { 0, 0 }
+};
+
+static driver_t apm_driver = {
+       "apm",
+       apm_methods,
+       1,                      /* no softc (XXX) */
+};
+
+static devclass_t apm_devclass;
+
+DRIVER_MODULE(apm, nexus, apm_driver, apm_devclass, 0, 0);
diff --git a/sys/platform/pc64/apm/apm.h b/sys/platform/pc64/apm/apm.h
new file mode 100644 (file)
index 0000000..41a9c14
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * APM (Advanced Power Management) BIOS Device Driver
+ *
+ * Copyright (c) 1994 UKAI, Fumitoshi.
+ * Copyright (c) 1994-1995 by HOSOKAWA, Tatsumi <hosokawa@jp.FreeBSD.org>
+ * Copyright (c) 1996 Nate Williams <nate@FreeBSD.org>
+ * Copyright (c) 1997 Poul-Henning Kamp <phk@FreeBSD.org>
+ *
+ * This software may be used, modified, copied, and distributed, in
+ * both source and binary form provided that the above copyright and
+ * these terms are retained. Under no circumstances is the author
+ * responsible for the proper functioning of this software, nor does
+ * the author assume any responsibility for damages incurred with its
+ * use.
+ *
+ * Sep, 1994   Implemented on FreeBSD 1.1.5.1R (Toshiba AVS001WD)
+ *
+ * $FreeBSD: src/sys/i386/apm/apm.h,v 1.1 1999/10/02 03:34:15 nsayer Exp $
+ * $DragonFly: src/sys/platform/pc32/apm/apm.h,v 1.2 2003/06/17 04:28:34 dillon Exp $
+ */
+
+#define APM_NEVENTS 16
+#define APM_NPMEV   13
+
+/* static data */
+struct apm_softc {
+       int     initialized, active, bios_busy;
+       int     always_halt_cpu, slow_idle_cpu;
+       int     disabled, disengaged;
+       int     standby_countdown, suspend_countdown;
+       u_int   minorversion, majorversion;
+       u_int   intversion, connectmode;
+       u_int   standbys, suspends;
+/*     struct bios_args bios;*/
+       struct apmhook sc_suspend;
+       struct apmhook sc_resume;
+       struct selinfo sc_rsel;
+       int     sc_flags;
+       int     event_count;
+       int     event_ptr;
+       struct  apm_event_info event_list[APM_NEVENTS];
+       u_char  event_filter[APM_NPMEV];
+};
index ba0f31f..5a481a1 100644 (file)
@@ -55,8 +55,8 @@
 
 #define ACPI_ASM_MACROS
 #define BREAKPOINT3
-#define ACPI_DISABLE_IRQS() disable_intr()
-#define ACPI_ENABLE_IRQS()  enable_intr()
+#define ACPI_DISABLE_IRQS() cpu_disable_intr()
+#define ACPI_ENABLE_IRQS()  cpu_enable_intr()
 
 #define ACPI_FLUSH_CPU_CACHE() wbinvd()
 
@@ -75,6 +75,5 @@ extern int    acpi_release_global_lock(uint32_t *lock);
 #define COMPILER_DEPENDENT_UINT64      unsigned long
 
 void   acpi_SetDefaultIntrModel(int model);
-void   acpi_cpu_c1(void);
 
 #endif /* _MACHINE_ACPICA_MACHDEP_H__ */
diff --git a/sys/platform/pc64/include/apm_bios.h b/sys/platform/pc64/include/apm_bios.h
new file mode 100644 (file)
index 0000000..a436ad7
--- /dev/null
@@ -0,0 +1,267 @@
+/*
+ * APM (Advanced Power Management) BIOS Device Driver
+ *
+ * Copyright (c) 1994-1995 by HOSOKAWA, Tatsumi <hosokawa@mt.cs.keio.ac.jp>
+ *
+ * This software may be used, modified, copied, and distributed, in
+ * both source and binary form provided that the above copyright and
+ * these terms are retained. Under no circumstances is the author
+ * responsible for the proper functioning of this software, nor does
+ * the author assume any responsibility for damages incurred with its
+ * use.
+ *
+ * Aug, 1994   Implemented on FreeBSD 1.1.5.1R (Toshiba AVS001WD)
+ *
+ * $FreeBSD: src/sys/i386/include/apm_bios.h,v 1.27.2.2 2002/04/12 16:47:00 bmah Exp $
+ * $DragonFly: src/sys/platform/pc32/include/apm_bios.h,v 1.5 2006/05/20 02:42:06 dillon Exp $
+ */
+
+#ifndef        _MACHINE_APM_BIOS_H_
+#define        _MACHINE_APM_BIOS_H_
+
+#ifndef _SYS_TYPES_H_
+#include <sys/types.h>
+#endif
+#ifndef _SYS_IOCCOM_H_
+#include <sys/ioccom.h>
+#endif
+
+/* BIOS id */
+#define APM_BIOS               0x53
+#define APM_INT                        0x15
+
+/* APM flags */
+#define APM_16BIT_SUPPORT      0x01
+#define APM_32BIT_SUPPORT      0x02
+#define APM_CPUIDLE_SLOW       0x04
+#define APM_DISABLED           0x08
+#define APM_DISENGAGED         0x10
+
+/* APM initializer physical address */
+#define APM_OURADDR            0x00080000
+
+/* APM functions */
+#define APM_INSTCHECK          0x00
+#define APM_REALCONNECT                0x01
+#define APM_PROT16CONNECT      0x02
+#define APM_PROT32CONNECT      0x03
+#define APM_DISCONNECT         0x04
+#define APM_CPUIDLE            0x05
+#define APM_CPUBUSY            0x06
+#define APM_SETPWSTATE         0x07
+#define APM_ENABLEDISABLEPM    0x08
+#define APM_RESTOREDEFAULT     0x09
+#define        APM_GETPWSTATUS         0x0a
+#define APM_GETPMEVENT         0x0b
+#define APM_GETPWSTATE         0x0c
+#define APM_ENABLEDISABLEDPM   0x0d
+#define APM_DRVVERSION         0x0e
+#define APM_ENGAGEDISENGAGEPM  0x0f
+#define APM_GETCAPABILITIES    0x10
+#define APM_RESUMETIMER                0x11
+#define APM_RESUMEONRING       0x12
+#define APM_TIMERREQUESTS      0x13
+#define APM_OEMFUNC            0x80
+
+/* error code */
+#define APME_OK                        0x00
+#define APME_PMDISABLED                0x01
+#define APME_REALESTABLISHED   0x02
+#define APME_NOTCONNECTED      0x03
+#define APME_PROT16ESTABLISHED 0x05
+#define APME_PROT16NOTSUPPORTED        0x06
+#define APME_PROT32ESTABLISHED 0x07
+#define APME_PROT32NOTDUPPORTED        0x08
+#define APME_UNKNOWNDEVICEID   0x09
+#define APME_OUTOFRANGE                0x0a
+#define APME_NOTENGAGED                0x0b
+#define APME_CANTENTERSTATE    0x60
+#define APME_NOPMEVENT         0x80
+#define APME_NOAPMPRESENT      0x86
+
+
+/* device code */
+#define PMDV_APMBIOS           0x0000
+#define PMDV_ALLDEV            0x0001
+#define PMDV_DISP0             0x0100
+#define PMDV_DISP1             0x0101
+#define PMDV_DISPALL           0x01ff
+#define PMDV_2NDSTORAGE0       0x0200
+#define PMDV_2NDSTORAGE1       0x0201
+#define PMDV_2NDSTORAGE2       0x0202
+#define PMDV_2NDSTORAGE3       0x0203
+#define PMDV_PARALLEL0         0x0300
+#define PMDV_PARALLEL1         0x0301
+#define PMDV_SERIAL0           0x0400
+#define PMDV_SERIAL1           0x0401
+#define PMDV_SERIAL2           0x0402
+#define PMDV_SERIAL3           0x0403
+#define PMDV_SERIAL4           0x0404
+#define PMDV_SERIAL5           0x0405
+#define PMDV_SERIAL6           0x0406
+#define PMDV_SERIAL7           0x0407
+#define PMDV_NET0              0x0500
+#define PMDV_NET1              0x0501
+#define PMDV_NET2              0x0502
+#define PMDV_NET3              0x0503
+#define PMDV_PCMCIA0           0x0600
+#define PMDV_PCMCIA1           0x0601
+#define PMDV_PCMCIA2           0x0602
+#define PMDV_PCMCIA3           0x0603
+/* 0x0700 - 0x7fff     Reserved                        */
+#define PMDV_BATT_BASE         0x8000
+#define PMDV_BATT0             0x8001
+#define PMDV_BATT1             0x8002
+#define PMDV_BATT_ALL          0x80ff
+/* 0x8100 - 0xdfff     Reserved                        */
+/* 0xe000 - 0xefff     OEM-defined power device IDs    */
+/* 0xf000 - 0xffff     Reserved                        */
+
+/* Power state */
+#define PMST_APMENABLED                0x0000
+#define PMST_STANDBY           0x0001
+#define PMST_SUSPEND           0x0002
+#define PMST_OFF               0x0003
+#define PMST_LASTREQNOTIFY     0x0004
+#define PMST_LASTREQREJECT     0x0005
+/* 0x0006 - 0x001f     Reserved system states          */
+/* 0x0020 - 0x003f     OEM-defined system states       */
+/* 0x0040 - 0x007f     OEM-defined device states       */
+/* 0x0080 - 0xffff     Reserved device states          */
+
+#if !defined(ASSEMBLER) && !defined(INITIALIZER)
+
+/* C definitions */
+struct apmhook {
+       struct apmhook  *ah_next;
+       int             (*ah_fun) (void *ah_arg);
+       void            *ah_arg;
+       const char      *ah_name;
+       int             ah_order;
+};
+#define APM_HOOK_NONE          (-1)
+#define APM_HOOK_SUSPEND        0
+#define APM_HOOK_RESUME         1
+#define NAPM_HOOK               2
+
+#ifdef _KERNEL
+
+void apm_suspend(int state);
+struct apmhook *apm_hook_establish (int apmh, struct apmhook *);
+void apm_hook_disestablish (int apmh, struct apmhook *);
+void apm_cpu_idle(void);
+void apm_cpu_busy(void);
+
+#endif
+
+#endif /* !ASSEMBLER && !INITIALIZER */
+
+#define APM_MIN_ORDER          0x00
+#define APM_MID_ORDER          0x80
+#define APM_MAX_ORDER          0xff
+
+/* power management event code */
+#define PMEV_NOEVENT           0x0000
+#define PMEV_STANDBYREQ                0x0001
+#define PMEV_SUSPENDREQ                0x0002
+#define PMEV_NORMRESUME                0x0003
+#define PMEV_CRITRESUME                0x0004
+#define PMEV_BATTERYLOW                0x0005
+#define PMEV_POWERSTATECHANGE  0x0006
+#define PMEV_UPDATETIME                0x0007
+#define PMEV_CRITSUSPEND       0x0008
+#define PMEV_USERSTANDBYREQ    0x0009
+#define PMEV_USERSUSPENDREQ    0x000a
+#define PMEV_STANDBYRESUME     0x000b
+#define PMEV_CAPABILITIESCHANGE 0x000c
+/* 0x000d - 0x00ff     Reserved system events  */
+/* 0x0100 - 0x01ff     Reserved device events  */
+/* 0x0200 - 0x02ff     OEM-defined APM events  */
+/* 0x0300 - 0xffff     Reserved                */
+#define PMEV_DEFAULT           0xffffffff      /* used for customization */
+
+#if !defined(ASSEMBLER) && !defined(INITIALIZER)
+
+/*
+ * Old apm_info structure, returned by the APMIO_GETINFO_OLD ioctl.  This
+ * is for backward compatibility with old executables.
+ */
+typedef struct apm_info_old {
+       u_int   ai_major;       /* APM major version */
+       u_int   ai_minor;       /* APM minor version */
+       u_int   ai_acline;      /* AC line status */
+       u_int   ai_batt_stat;   /* Battery status */
+       u_int   ai_batt_life;   /* Remaining battery life */
+       u_int   ai_status;      /* Status of APM support (enabled/disabled) */
+} *apm_info_old_t;
+
+/*
+ * Structure returned by the APMIO_GETINFO ioctl.
+ *
+ * In the comments below, the parenthesized numbers indicate the minimum
+ * value of ai_infoversion for which each field is valid.
+ */
+typedef struct apm_info {
+       u_int   ai_infoversion; /* Indicates which fields are valid */
+       u_int   ai_major;       /* APM major version (0) */
+       u_int   ai_minor;       /* APM minor version (0) */
+       u_int   ai_acline;      /* AC line status (0) */
+       u_int   ai_batt_stat;   /* Battery status (0) */
+       u_int   ai_batt_life;   /* Remaining battery life in percent (0) */
+       int     ai_batt_time;   /* Remaining battery time in seconds (0) */
+       u_int   ai_status;      /* True if enabled (0) */
+       u_int   ai_batteries;   /* Number of batteries (1) */
+       u_int   ai_capabilities;/* APM Capabilities (1) */
+       u_int   ai_spare[6];    /* For future expansion */
+} *apm_info_t;
+
+/* Battery flag */
+#define APM_BATT_HIGH          0x01
+#define APM_BATT_LOW           0x02
+#define APM_BATT_CRITICAL      0x04
+#define APM_BATT_CHARGING      0x08
+#define APM_BATT_NOT_PRESENT   0x10
+#define APM_BATT_NO_SYSTEM     0x80
+
+typedef struct apm_pwstatus {
+       u_int   ap_device;      /* Device code of battery */
+       u_int   ap_acline;      /* AC line status (0) */
+       u_int   ap_batt_stat;   /* Battery status (0) */
+       u_int   ap_batt_flag;   /* Battery flag (0) */
+       u_int   ap_batt_life;   /* Remaining battery life in percent (0) */
+       int     ap_batt_time;   /* Remaining battery time in seconds (0) */
+} *apm_pwstatus_t;
+
+struct apm_bios_arg {
+        u_long eax;
+        u_long ebx;
+        u_long ecx;
+        u_long edx;
+        u_long esi;
+        u_long edi;
+};
+
+struct apm_event_info {
+       u_int type;
+       u_int index;
+       u_int spare[8];
+};
+
+#define APMIO_SUSPEND          _IO('P', 1)
+#define APMIO_GETINFO_OLD      _IOR('P', 2, struct apm_info_old)
+#define APMIO_ENABLE           _IO('P', 5)
+#define APMIO_DISABLE          _IO('P', 6)
+#define APMIO_HALTCPU          _IO('P', 7)
+#define APMIO_NOTHALTCPU       _IO('P', 8)
+#define APMIO_DISPLAY          _IOW('P', 9, int)
+#define APMIO_BIOS             _IOWR('P', 10, struct apm_bios_arg)
+#define APMIO_GETINFO          _IOR('P', 11, struct apm_info)
+#define APMIO_STANDBY          _IO('P', 12)
+#define APMIO_GETPWSTATUS      _IOWR('P', 13, struct apm_pwstatus)
+/* for /dev/apmctl */
+#define APMIO_NEXTEVENT                _IOR('A', 100, struct apm_event_info)
+#define APMIO_REJECTLASTREQ    _IO('P', 101)
+
+#endif /* !ASSEMBLER && !INITIALIZER */
+
+#endif /* !_MACHINE_APM_BIOS_H_ */
diff --git a/sys/platform/pc64/include/apm_segments.h b/sys/platform/pc64/include/apm_segments.h
new file mode 100644 (file)
index 0000000..48bae25
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * LP (Laptop Package)
+ *
+ * Copyright (C) 1994 by HOSOKAWA Tatsumi <hosokawa@mt.cs.keio.ac.jp>
+ *
+ * This software may be used, modified, copied, and distributed, in
+ * both source and binary form provided that the above copyright and
+ * these terms are retained. Under no circumstances is the author
+ * responsible for the proper functioning of this software, nor does
+ * the author assume any responsibility for damages incurred with its
+ * use.
+ *
+ * Sep., 1994  Implemented on FreeBSD 1.1.5.1R (Toshiba AVS001WD)
+ *
+ * $FreeBSD: src/sys/i386/include/apm_segments.h,v 1.8 1999/08/28 00:44:05 peter Exp $
+ * $DragonFly: src/sys/platform/pc32/include/apm_segments.h,v 1.3 2003/06/28 04:16:03 dillon Exp $
+ */
+
+#ifndef _MACHINE_APM_SEGMENTS_H_
+#define _MACHINE_APM_SEGMENTS_H_
+
+#define SIZEOF_GDT             8
+#define BOOTSTRAP_GDT_NUM      32
+
+#define APM_INIT_CS_INDEX      (BOOTSTRAP_GDT_NUM - 4)
+#define APM_INIT_DS_INDEX      (BOOTSTRAP_GDT_NUM - 3)
+#define APM_INIT_CS16_INDEX    (BOOTSTRAP_GDT_NUM - 2)
+#define APM_INIT_DS16_INDEX    (BOOTSTRAP_GDT_NUM - 1)
+#define APM_INIT_CS_SEL                (APM_INIT_CS_INDEX << 3)
+#define APM_INIT_DS_SEL                (APM_INIT_DS_INDEX << 3)
+#define APM_INIT_CS16_SEL      (APM_INIT_CS16_INDEX << 3)
+#define APM_INIT_DS16_SEL      (APM_INIT_DS16_INDEX << 3)
+
+#define CS32_ATTRIB            0x409e
+#define DS32_ATTRIB            0x4092
+#define CS16_ATTRIB            0x009e
+#define DS16_ATTRIB            0x0092
+
+#endif
diff --git a/sys/platform/pc64/include/cpufreq.h b/sys/platform/pc64/include/cpufreq.h
new file mode 100644 (file)
index 0000000..1bb0944
--- /dev/null
@@ -0,0 +1,22 @@
+#ifndef _MACHINE_CPUFREQ_H_
+#define _MACHINE_CPUFREQ_H_
+
+struct amd0f_fidvid {
+       uint32_t        fid;
+       uint32_t        vid;
+};
+
+struct amd0f_xsit {
+       uint32_t        rvo;
+       uint32_t        mvs;
+       uint32_t        vst;
+       uint32_t        pll_time;
+       uint32_t        irt;
+};
+
+void   amd0f_fidvid_limit(struct amd0f_fidvid *, struct amd0f_fidvid *);
+int    amd0f_set_fidvid(const struct amd0f_fidvid *,
+           const struct amd0f_xsit *);
+int    amd0f_get_fidvid(struct amd0f_fidvid *);
+
+#endif /* !_MACHINE_CPUFREQ_H_ */
index 288c1ea..fb208f3 100644 (file)
@@ -43,6 +43,7 @@
 extern u_long  atdevbase;      /* offset in virtual memory of ISA io mem */
 extern u_int   basemem;
 extern int     busdma_swi_pending;
+extern void    (*cpu_idle_hook)(void);
 extern u_int   cpu_exthigh;
 extern u_int   amd_feature;
 extern u_int   amd_feature2;