2 * Copyright (c) 2003 Nate Lawson (SDG)
3 * Copyright (c) 2001 Michael Smith
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $FreeBSD: src/sys/dev/acpica/acpi_cpu.c,v 1.29 2003/12/28 22:15:24 njl Exp $
28 * $DragonFly: src/sys/dev/acpica5/acpi_cpu.c,v 1.2 2004/05/05 22:19:24 dillon Exp $
32 #include <sys/param.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/globaldata.h>
37 #include <sys/power.h>
41 #include <bus/pci/pcivar.h>
42 #include <machine/atomic.h>
43 #include <machine/bus.h>
44 #include <machine/globaldata.h>
45 #include <machine/smp.h>
52 * Support for ACPI Processor devices, including ACPI 2.0 throttling
53 * and C[1-3] sleep states.
55 * TODO: implement scans of all CPUs to be sure all Cx states are
59 /* Hooks for the ACPI CA debugging infrastructure */
60 #define _COMPONENT ACPI_PROCESSOR
61 ACPI_MODULE_NAME("PROCESSOR")
64 struct resource *p_lvlx; /* Register to read to enter state. */
65 uint32_t type; /* C1-3 (C4 and up treated as C3). */
66 uint32_t trans_lat; /* Transition latency (usec). */
67 uint32_t power; /* Power consumed (mW). */
69 #define MAX_CX_STATES 8
71 struct acpi_cx_stats {
72 int long_slp; /* Count of sleeps >= trans_lat. */
73 int short_slp; /* Count of sleeps < trans_lat. */
76 struct acpi_cpu_softc {
78 ACPI_HANDLE cpu_handle;
79 uint32_t acpi_id; /* ACPI processor id */
80 uint32_t cpu_p_blk; /* ACPI P_BLK location */
81 uint32_t cpu_p_blk_len; /* P_BLK length (must be 6). */
82 struct resource *cpu_p_cnt; /* Throttling control register */
83 struct acpi_cx cpu_cx_states[MAX_CX_STATES];
84 int cpu_cx_count; /* Number of valid Cx states. */
87 #define CPU_GET_REG(reg, width) \
88 (bus_space_read_ ## width(rman_get_bustag((reg)), \
89 rman_get_bushandle((reg)), 0))
90 #define CPU_SET_REG(reg, width, val) \
91 (bus_space_write_ ## width(rman_get_bustag((reg)), \
92 rman_get_bushandle((reg)), 0, (val)))
95 * Speeds are stored in counts, from 1 to CPU_MAX_SPEED, and
96 * reported to the user in tenths of a percent.
98 static uint32_t cpu_duty_offset;
99 static uint32_t cpu_duty_width;
100 #define CPU_MAX_SPEED (1 << cpu_duty_width)
101 #define CPU_SPEED_PERCENT(x) ((1000 * (x)) / CPU_MAX_SPEED)
102 #define CPU_SPEED_PRINTABLE(x) (CPU_SPEED_PERCENT(x) / 10), \
103 (CPU_SPEED_PERCENT(x) % 10)
104 #define CPU_P_CNT_THT_EN (1<<4)
105 #define PM_USEC(x) ((x) >> 2) /* ~4 clocks per usec (3.57955 Mhz) */
107 #define ACPI_CPU_NOTIFY_PERF_STATES 0x80 /* _PSS changed. */
108 #define ACPI_CPU_NOTIFY_CX_STATES 0x81 /* _CST changed. */
110 #define CPU_QUIRK_NO_C3 0x0001 /* C3-type states are not usable. */
111 #define CPU_QUIRK_NO_THROTTLE 0x0002 /* Throttling is not usable. */
113 #define PCI_VENDOR_INTEL 0x8086
114 #define PCI_DEVICE_82371AB_3 0x7113 /* PIIX4 chipset for quirks. */
115 #define PCI_REVISION_A_STEP 0
116 #define PCI_REVISION_B_STEP 1
117 #define PCI_REVISION_4E 2
118 #define PCI_REVISION_4M 3
120 /* Platform hardware resource information. */
121 static uint32_t cpu_smi_cmd; /* Value to write to SMI_CMD. */
122 static uint8_t cpu_pstate_cnt;/* Register to take over throttling. */
123 static uint8_t cpu_cst_cnt; /* Indicate we are _CST aware. */
124 static uint32_t cpu_rid; /* Driver-wide resource id. */
125 static uint32_t cpu_quirks; /* Indicate any hardware bugs. */
128 static int cpu_cx_count; /* Number of valid states */
129 static uint32_t cpu_cx_next; /* State to use for next sleep. */
130 static uint32_t cpu_non_c3; /* Index of lowest non-C3 state. */
131 static struct acpi_cx_stats cpu_cx_stats[MAX_CX_STATES];
132 static int cpu_idle_busy; /* Count of CPUs in acpi_cpu_idle. */
134 /* Values for sysctl. */
135 static uint32_t cpu_throttle_state;
136 static uint32_t cpu_throttle_max;
137 static int cpu_cx_lowest;
138 static char cpu_cx_supported[64];
140 static device_t *cpu_devices;
141 static int cpu_ndevices;
142 static struct acpi_cpu_softc **cpu_softc;
144 static struct sysctl_ctx_list acpi_cpu_sysctl_ctx;
145 static struct sysctl_oid *acpi_cpu_sysctl_tree;
147 static int acpi_cpu_probe(device_t dev);
148 static int acpi_cpu_attach(device_t dev);
149 static int acpi_pcpu_get_id(uint32_t idx, uint32_t *acpi_id,
151 static int acpi_cpu_shutdown(device_t dev);
152 static int acpi_cpu_throttle_probe(struct acpi_cpu_softc *sc);
153 static int acpi_cpu_cx_probe(struct acpi_cpu_softc *sc);
154 static int acpi_cpu_cx_cst(struct acpi_cpu_softc *sc);
155 static void acpi_cpu_startup(void *arg);
156 static void acpi_cpu_startup_throttling(void);
157 static void acpi_cpu_startup_cx(void);
158 static void acpi_cpu_throttle_set(uint32_t speed);
159 static void acpi_cpu_idle(void);
160 static void acpi_cpu_c1(void);
161 static void acpi_pm_ticksub(uint32_t *end, const uint32_t *start);
162 static void acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context);
163 static int acpi_cpu_quirks(struct acpi_cpu_softc *sc);
164 static int acpi_cpu_throttle_sysctl(SYSCTL_HANDLER_ARGS);
165 static int acpi_cpu_history_sysctl(SYSCTL_HANDLER_ARGS);
166 static int acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
168 static device_method_t acpi_cpu_methods[] = {
169 /* Device interface */
170 DEVMETHOD(device_probe, acpi_cpu_probe),
171 DEVMETHOD(device_attach, acpi_cpu_attach),
172 DEVMETHOD(device_shutdown, acpi_cpu_shutdown),
177 static driver_t acpi_cpu_driver = {
180 sizeof(struct acpi_cpu_softc),
183 static devclass_t acpi_cpu_devclass;
184 DRIVER_MODULE(acpi_cpu, acpi, acpi_cpu_driver, acpi_cpu_devclass, 0, 0);
187 acpi_cpu_probe(device_t dev)
189 if (!acpi_disabled("cpu") && acpi_get_type(dev) == ACPI_TYPE_PROCESSOR) {
190 device_set_desc(dev, "CPU");
191 if (cpu_softc == NULL)
192 cpu_softc = malloc(sizeof(struct acpi_cpu_softc *) *
193 SMP_MAXCPU, M_TEMP /* XXX */, M_INTWAIT | M_ZERO);
201 acpi_cpu_attach(device_t dev)
203 struct acpi_cpu_softc *sc;
204 struct acpi_softc *acpi_sc;
208 int thr_ret, cx_ret, cpu_id;
210 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
214 sc = device_get_softc(dev);
216 sc->cpu_handle = acpi_get_handle(dev);
218 /* Get our Processor object. */
220 buf.Length = sizeof(pobj);
221 status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf);
222 if (ACPI_FAILURE(status)) {
223 device_printf(dev, "Couldn't get Processor object - %s\n",
224 AcpiFormatException(status));
225 return_VALUE (ENXIO);
227 if (pobj.Type != ACPI_TYPE_PROCESSOR) {
228 device_printf(dev, "Processor object has bad type %d\n", pobj.Type);
229 return_VALUE (ENXIO);
233 * Find the processor associated with our unit. We could use the
234 * ProcId as a key, however, some boxes do not have the same values
235 * in their Processor object as the ProcId values in the MADT.
237 sc->acpi_id = pobj.Processor.ProcId;
238 if (acpi_pcpu_get_id(device_get_unit(dev), &sc->acpi_id, &cpu_id) != 0)
239 return_VALUE (ENXIO);
242 * Check if we already probed this processor. We scan the bus twice
243 * so it's possible we've already seen this one.
245 if (cpu_softc[cpu_id] != NULL)
247 cpu_softc[cpu_id] = sc;
249 /* Get various global values from the Processor object. */
250 sc->cpu_p_blk = pobj.Processor.PblkAddress;
251 sc->cpu_p_blk_len = pobj.Processor.PblkLength;
252 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_BLK at %#x/%d\n",
253 device_get_unit(dev), sc->cpu_p_blk, sc->cpu_p_blk_len));
255 acpi_sc = acpi_device_get_parent_softc(dev);
256 sysctl_ctx_init(&acpi_cpu_sysctl_ctx);
257 acpi_cpu_sysctl_tree = SYSCTL_ADD_NODE(&acpi_cpu_sysctl_ctx,
258 SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree),
259 OID_AUTO, "cpu", CTLFLAG_RD, 0, "");
261 /* If this is the first device probed, check for quirks. */
262 if (device_get_unit(dev) == 0)
266 * Probe for throttling and Cx state support.
267 * If none of these is present, free up unused resources.
269 thr_ret = acpi_cpu_throttle_probe(sc);
270 cx_ret = acpi_cpu_cx_probe(sc);
271 if (thr_ret == 0 || cx_ret == 0) {
272 status = AcpiInstallNotifyHandler(sc->cpu_handle, ACPI_DEVICE_NOTIFY,
273 acpi_cpu_notify, sc);
274 if (device_get_unit(dev) == 0)
275 AcpiOsQueueForExecution(OSD_PRIORITY_LO, acpi_cpu_startup, NULL);
277 sysctl_ctx_free(&acpi_cpu_sysctl_ctx);
284 * Find the nth present CPU and return its pc_cpuid as well as set the
285 * pc_acpi_id from the most reliable source.
288 acpi_pcpu_get_id(uint32_t idx, uint32_t *acpi_id, uint32_t *cpu_id)
290 struct mdglobaldata *md;
293 KASSERT(acpi_id != NULL, ("Null acpi_id"));
294 KASSERT(cpu_id != NULL, ("Null cpu_id"));
295 for (i = 0; i < ncpus; i++) {
296 if ((smp_active_mask & (1 << i)) == 0)
298 md = (struct mdglobaldata *)globaldata_find(i);
299 KASSERT(md != NULL, ("no pcpu data for %d", i));
302 * If pc_acpi_id was not initialized (e.g., a non-APIC UP box)
303 * override it with the value from the ASL. Otherwise, if the
304 * two don't match, prefer the MADT-derived value. Finally,
305 * return the pc_cpuid to reference this processor.
307 if (md->gd_acpi_id == 0xffffffff)
308 md->gd_acpi_id = *acpi_id;
309 else if (md->gd_acpi_id != *acpi_id)
310 *acpi_id = md->gd_acpi_id;
311 *cpu_id = md->mi.gd_cpuid;
320 acpi_cpu_shutdown(device_t dev)
322 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
324 /* Disable any entry to the idle function. */
327 /* Wait for all processors to exit acpi_cpu_idle(). */
329 /*smp_rendezvous(NULL, NULL, NULL, NULL);*/
330 KKASSERT(0); /* XXX use rendezvous */
332 while (cpu_idle_busy > 0)
339 acpi_cpu_throttle_probe(struct acpi_cpu_softc *sc)
344 ACPI_GENERIC_ADDRESS gas;
347 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
351 /* Get throttling parameters from the FADT. 0 means not supported. */
352 if (device_get_unit(sc->cpu_dev) == 0) {
353 cpu_smi_cmd = AcpiGbl_FADT->SmiCmd;
354 cpu_pstate_cnt = AcpiGbl_FADT->PstateCnt;
355 cpu_cst_cnt = AcpiGbl_FADT->CstCnt;
356 cpu_duty_offset = AcpiGbl_FADT->DutyOffset;
357 cpu_duty_width = AcpiGbl_FADT->DutyWidth;
359 if (cpu_duty_width == 0 || (cpu_quirks & CPU_QUIRK_NO_THROTTLE) != 0)
362 /* Validate the duty offset/width. */
363 duty_end = cpu_duty_offset + cpu_duty_width - 1;
365 device_printf(sc->cpu_dev, "CLK_VAL field overflows P_CNT register\n");
368 if (cpu_duty_offset <= 4 && duty_end >= 4) {
369 device_printf(sc->cpu_dev, "CLK_VAL field overlaps THT_EN bit\n");
374 * If not present, fall back to using the processor's P_BLK to find
375 * the P_CNT register.
377 * Note that some systems seem to duplicate the P_BLK pointer
378 * across multiple CPUs, so not getting the resource is not fatal.
381 buf.Length = sizeof(obj);
382 status = AcpiEvaluateObject(sc->cpu_handle, "_PTC", NULL, &buf);
383 if (ACPI_SUCCESS(status)) {
384 if (obj.Buffer.Length < sizeof(ACPI_GENERIC_ADDRESS) + 3) {
385 device_printf(sc->cpu_dev, "_PTC buffer too small\n");
388 memcpy(&gas, obj.Buffer.Pointer + 3, sizeof(gas));
389 sc->cpu_p_cnt = acpi_bus_alloc_gas(sc->cpu_dev, &cpu_rid, &gas);
390 if (sc->cpu_p_cnt != NULL) {
391 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_CNT from _PTC\n",
392 device_get_unit(sc->cpu_dev)));
396 /* If _PTC not present or other failure, try the P_BLK. */
397 if (sc->cpu_p_cnt == NULL) {
398 /* The spec says P_BLK must be at least 6 bytes long. */
399 if (sc->cpu_p_blk == 0 || sc->cpu_p_blk_len != 6)
401 gas.Address = sc->cpu_p_blk;
402 gas.AddressSpaceId = ACPI_ADR_SPACE_SYSTEM_IO;
403 gas.RegisterBitWidth = 32;
404 sc->cpu_p_cnt = acpi_bus_alloc_gas(sc->cpu_dev, &cpu_rid, &gas);
405 if (sc->cpu_p_cnt != NULL) {
406 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_CNT from P_BLK\n",
407 device_get_unit(sc->cpu_dev)));
409 device_printf(sc->cpu_dev, "Failed to attach throttling P_CNT\n");
419 acpi_cpu_cx_probe(struct acpi_cpu_softc *sc)
421 ACPI_GENERIC_ADDRESS gas;
422 struct acpi_cx *cx_ptr;
425 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
427 /* Bus mastering arbitration control is needed for C3. */
428 if (AcpiGbl_FADT->V1_Pm2CntBlk == 0 || AcpiGbl_FADT->Pm2CntLen == 0) {
429 cpu_quirks |= CPU_QUIRK_NO_C3;
430 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
431 "acpi_cpu%d: No BM control, C3 disabled\n",
432 device_get_unit(sc->cpu_dev)));
436 * First, check for the ACPI 2.0 _CST sleep states object.
437 * If not usable, fall back to the P_BLK's P_LVL2 and P_LVL3.
439 sc->cpu_cx_count = 0;
440 error = acpi_cpu_cx_cst(sc);
442 cx_ptr = sc->cpu_cx_states;
444 /* C1 has been required since just after ACPI 1.0 */
445 cx_ptr->type = ACPI_STATE_C1;
446 cx_ptr->trans_lat = 0;
451 if (sc->cpu_p_blk_len != 6)
454 /* Validate and allocate resources for C2 (P_LVL2). */
455 gas.AddressSpaceId = ACPI_ADR_SPACE_SYSTEM_IO;
456 gas.RegisterBitWidth = 8;
457 if (AcpiGbl_FADT->Plvl2Lat < 100) {
458 gas.Address = sc->cpu_p_blk + 4;
459 cx_ptr->p_lvlx = acpi_bus_alloc_gas(sc->cpu_dev, &cpu_rid, &gas);
460 if (cx_ptr->p_lvlx != NULL) {
462 cx_ptr->type = ACPI_STATE_C2;
463 cx_ptr->trans_lat = AcpiGbl_FADT->Plvl2Lat;
470 /* Validate and allocate resources for C3 (P_LVL3). */
471 if (AcpiGbl_FADT->Plvl3Lat < 1000 &&
472 (cpu_quirks & CPU_QUIRK_NO_C3) == 0) {
474 gas.Address = sc->cpu_p_blk + 5;
475 cx_ptr->p_lvlx = acpi_bus_alloc_gas(sc->cpu_dev, &cpu_rid, &gas);
476 if (cx_ptr->p_lvlx != NULL) {
478 cx_ptr->type = ACPI_STATE_C3;
479 cx_ptr->trans_lat = AcpiGbl_FADT->Plvl3Lat;
487 /* If no valid registers were found, don't attach. */
488 if (sc->cpu_cx_count == 0)
495 * Parse a _CST package and set up its Cx states. Since the _CST object
496 * can change dynamically, our notify handler may call this function
497 * to clean up and probe the new _CST package.
500 acpi_cpu_cx_cst(struct acpi_cpu_softc *sc)
502 struct acpi_cx *cx_ptr;
510 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
513 buf.Length = ACPI_ALLOCATE_BUFFER;
514 status = AcpiEvaluateObject(sc->cpu_handle, "_CST", NULL, &buf);
515 if (ACPI_FAILURE(status))
518 /* _CST is a package with a count and at least one Cx package. */
519 top = (ACPI_OBJECT *)buf.Pointer;
520 if (!ACPI_PKG_VALID(top, 2) || acpi_PkgInt32(top, 0, &count) != 0) {
521 device_printf(sc->cpu_dev, "Invalid _CST package\n");
522 AcpiOsFree(buf.Pointer);
525 if (count != top->Package.Count - 1) {
526 device_printf(sc->cpu_dev, "Invalid _CST state count (%d != %d)\n",
527 count, top->Package.Count - 1);
528 count = top->Package.Count - 1;
530 if (count > MAX_CX_STATES) {
531 device_printf(sc->cpu_dev, "_CST has too many states (%d)\n", count);
532 count = MAX_CX_STATES;
535 /* Set up all valid states. */
536 sc->cpu_cx_count = 0;
537 cx_ptr = sc->cpu_cx_states;
538 for (i = 0; i < count; i++) {
539 pkg = &top->Package.Elements[i + 1];
540 if (!ACPI_PKG_VALID(pkg, 4) ||
541 acpi_PkgInt32(pkg, 1, &cx_ptr->type) != 0 ||
542 acpi_PkgInt32(pkg, 2, &cx_ptr->trans_lat) != 0 ||
543 acpi_PkgInt32(pkg, 3, &cx_ptr->power) != 0) {
545 device_printf(sc->cpu_dev, "Skipping invalid Cx state package\n");
549 /* Validate the state to see if we should use it. */
550 switch (cx_ptr->type) {
557 if (cx_ptr->trans_lat > 100) {
558 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
559 "acpi_cpu%d: C2[%d] not available.\n",
560 device_get_unit(sc->cpu_dev), i));
567 if (cx_ptr->trans_lat > 1000 ||
568 (cpu_quirks & CPU_QUIRK_NO_C3) != 0) {
570 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
571 "acpi_cpu%d: C3[%d] not available.\n",
572 device_get_unit(sc->cpu_dev), i));
579 /* Free up any previous register. */
580 if (cx_ptr->p_lvlx != NULL) {
581 bus_release_resource(sc->cpu_dev, 0, 0, cx_ptr->p_lvlx);
582 cx_ptr->p_lvlx = NULL;
586 /* Allocate the control register for C2 or C3. */
587 acpi_PkgGas(sc->cpu_dev, pkg, 0, &cpu_rid, &cx_ptr->p_lvlx);
588 if (cx_ptr->p_lvlx != NULL) {
590 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
591 "acpi_cpu%d: Got C%d - %d latency\n",
592 device_get_unit(sc->cpu_dev), cx_ptr->type,
598 AcpiOsFree(buf.Pointer);
604 * Call this *after* all CPUs have been attached.
607 acpi_cpu_startup(void *arg)
609 struct acpi_cpu_softc *sc;
612 /* Get set of CPU devices */
613 devclass_get_devices(acpi_cpu_devclass, &cpu_devices, &cpu_ndevices);
616 * Make sure all the processors' Cx counts match. We should probably
617 * also check the contents of each. However, no known systems have
618 * non-matching Cx counts so we'll deal with this later.
620 count = MAX_CX_STATES;
621 for (i = 0; i < cpu_ndevices; i++) {
622 sc = device_get_softc(cpu_devices[i]);
623 count = min(sc->cpu_cx_count, count);
625 cpu_cx_count = count;
627 /* Perform throttling and Cx final initialization. */
628 sc = device_get_softc(cpu_devices[0]);
629 if (sc->cpu_p_cnt != NULL)
630 acpi_cpu_startup_throttling();
631 if (cpu_cx_count > 0)
632 acpi_cpu_startup_cx();
636 * Takes the ACPI lock to avoid fighting anyone over the SMI command
640 acpi_cpu_startup_throttling()
644 /* Initialise throttling states */
645 cpu_throttle_max = CPU_MAX_SPEED;
646 cpu_throttle_state = CPU_MAX_SPEED;
648 SYSCTL_ADD_INT(&acpi_cpu_sysctl_ctx,
649 SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
650 OID_AUTO, "throttle_max", CTLFLAG_RD,
651 &cpu_throttle_max, 0, "maximum CPU speed");
652 SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx,
653 SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
654 OID_AUTO, "throttle_state",
655 CTLTYPE_INT | CTLFLAG_RW, &cpu_throttle_state,
656 0, acpi_cpu_throttle_sysctl, "I", "current CPU speed");
658 /* If ACPI 2.0+, signal platform that we are taking over throttling. */
660 if (cpu_pstate_cnt != 0)
661 AcpiOsWritePort(cpu_smi_cmd, cpu_pstate_cnt, 8);
663 /* Set initial speed to maximum. */
664 acpi_cpu_throttle_set(cpu_throttle_max);
667 printf("acpi_cpu: throttling enabled, %d steps (100%% to %d.%d%%), "
668 "currently %d.%d%%\n", CPU_MAX_SPEED, CPU_SPEED_PRINTABLE(1),
669 CPU_SPEED_PRINTABLE(cpu_throttle_state));
673 acpi_cpu_startup_cx()
675 struct acpi_cpu_softc *sc;
680 sc = device_get_softc(cpu_devices[0]);
681 sbuf_new(&sb, cpu_cx_supported, sizeof(cpu_cx_supported), SBUF_FIXEDLEN);
682 for (i = 0; i < cpu_cx_count; i++) {
683 sbuf_printf(&sb, "C%d/%d ", sc->cpu_cx_states[i].type,
684 sc->cpu_cx_states[i].trans_lat);
688 SYSCTL_ADD_STRING(&acpi_cpu_sysctl_ctx,
689 SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
690 OID_AUTO, "cx_supported", CTLFLAG_RD, cpu_cx_supported,
691 0, "Cx/microsecond values for supported Cx states");
692 SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx,
693 SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
694 OID_AUTO, "cx_lowest", CTLTYPE_INT | CTLFLAG_RW,
695 NULL, 0, acpi_cpu_cx_lowest_sysctl, "I",
696 "lowest Cx sleep state to use");
697 SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx,
698 SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
699 OID_AUTO, "cx_history", CTLTYPE_STRING | CTLFLAG_RD,
700 NULL, 0, acpi_cpu_history_sysctl, "A", "");
703 /* Signal platform that we can handle _CST notification. */
704 if (cpu_cst_cnt != 0) {
706 AcpiOsWritePort(cpu_smi_cmd, cpu_cst_cnt, 8);
711 /* Take over idling from cpu_idle_default(). */
712 cpu_cx_next = cpu_cx_lowest;
714 /* cpu_idle_hook = acpi_cpu_idle; */
718 * Set CPUs to the new state.
720 * Must be called with the ACPI lock held.
723 acpi_cpu_throttle_set(uint32_t speed)
725 struct acpi_cpu_softc *sc;
727 uint32_t p_cnt, clk_val;
731 /* Iterate over processors */
732 for (i = 0; i < cpu_ndevices; i++) {
733 sc = device_get_softc(cpu_devices[i]);
734 if (sc->cpu_p_cnt == NULL)
737 /* Get the current P_CNT value and disable throttling */
738 p_cnt = CPU_GET_REG(sc->cpu_p_cnt, 4);
739 p_cnt &= ~CPU_P_CNT_THT_EN;
740 CPU_SET_REG(sc->cpu_p_cnt, 4, p_cnt);
742 /* If we're at maximum speed, that's all */
743 if (speed < CPU_MAX_SPEED) {
744 /* Mask the old CLK_VAL off and or-in the new value */
745 clk_val = (CPU_MAX_SPEED - 1) << cpu_duty_offset;
747 p_cnt |= (speed << cpu_duty_offset);
749 /* Write the new P_CNT value and then enable throttling */
750 CPU_SET_REG(sc->cpu_p_cnt, 4, p_cnt);
751 p_cnt |= CPU_P_CNT_THT_EN;
752 CPU_SET_REG(sc->cpu_p_cnt, 4, p_cnt);
754 ACPI_VPRINT(sc->cpu_dev, acpi_device_get_parent_softc(sc->cpu_dev),
755 "set speed to %d.%d%%\n", CPU_SPEED_PRINTABLE(speed));
757 cpu_throttle_state = speed;
761 * Idle the CPU in the lowest state possible.
762 * This function is called with interrupts disabled.
767 struct acpi_cpu_softc *sc;
768 struct acpi_cx *cx_next;
769 uint32_t start_time, end_time;
770 int bm_active, i, asleep;
772 /* If disabled, return immediately. */
773 if (cpu_cx_count == 0) {
779 * Look up our CPU id to get our softc. If it's NULL, we'll use C1
780 * since there is no ACPI processor object for this CPU. This occurs
781 * for logical CPUs in the HTT case.
783 sc = cpu_softc[mdcpu->mi.gd_cpuid];
789 /* Record that a CPU is in the idle function. */
790 atomic_add_int(&cpu_idle_busy, 1);
793 * Check for bus master activity. If there was activity, clear
794 * the bit and use the lowest non-C3 state. Note that the USB
795 * driver polling for new devices keeps this bit set all the
796 * time if USB is enabled.
798 AcpiGetRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active,
799 ACPI_MTX_DO_NOT_LOCK);
800 if (bm_active != 0) {
801 AcpiSetRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1,
802 ACPI_MTX_DO_NOT_LOCK);
803 cpu_cx_next = min(cpu_cx_next, cpu_non_c3);
806 /* Perform the actual sleep based on the Cx-specific semantics. */
807 cx_next = &sc->cpu_cx_states[cpu_cx_next];
808 switch (cx_next->type) {
810 panic("acpi_cpu_idle: attempting to sleep in C0");
813 /* Execute HLT (or equivalent) and wait for an interrupt. */
817 * We can't calculate the time spent in C1 since the place we
818 * wake up is an ISR. Use a constant time of 1 ms.
825 * Read from P_LVLx to enter C2, checking time spent asleep.
826 * Use the ACPI timer for measuring sleep time. Since we need to
827 * get the time very close to the CPU start/stop clock logic, this
828 * is the only reliable time source.
830 AcpiHwLowLevelRead(32, &start_time, &AcpiGbl_FADT->XPmTmrBlk);
831 CPU_GET_REG(cx_next->p_lvlx, 1);
834 * Read the end time twice. Since it may take an arbitrary time
835 * to enter the idle state, the first read may be executed before
836 * the processor has stopped. Doing it again provides enough
837 * margin that we are certain to have a correct value.
839 AcpiHwLowLevelRead(32, &end_time, &AcpiGbl_FADT->XPmTmrBlk);
840 AcpiHwLowLevelRead(32, &end_time, &AcpiGbl_FADT->XPmTmrBlk);
845 /* Disable bus master arbitration and enable bus master wakeup. */
846 AcpiSetRegister(ACPI_BITREG_ARB_DISABLE, 1, ACPI_MTX_DO_NOT_LOCK);
847 AcpiSetRegister(ACPI_BITREG_BUS_MASTER_RLD, 1, ACPI_MTX_DO_NOT_LOCK);
849 /* Read from P_LVLx to enter C3, checking time spent asleep. */
850 AcpiHwLowLevelRead(32, &start_time, &AcpiGbl_FADT->XPmTmrBlk);
851 CPU_GET_REG(cx_next->p_lvlx, 1);
853 /* Read the end time twice. See comment for C2 above. */
854 AcpiHwLowLevelRead(32, &end_time, &AcpiGbl_FADT->XPmTmrBlk);
855 AcpiHwLowLevelRead(32, &end_time, &AcpiGbl_FADT->XPmTmrBlk);
857 /* Enable bus master arbitration and disable bus master wakeup. */
858 AcpiSetRegister(ACPI_BITREG_ARB_DISABLE, 0, ACPI_MTX_DO_NOT_LOCK);
859 AcpiSetRegister(ACPI_BITREG_BUS_MASTER_RLD, 0, ACPI_MTX_DO_NOT_LOCK);
864 /* Find the actual time asleep in microseconds, minus overhead. */
865 acpi_pm_ticksub(&end_time, &start_time);
866 asleep = PM_USEC(end_time) - cx_next->trans_lat;
868 /* Record statistics */
869 if (asleep < cx_next->trans_lat)
870 cpu_cx_stats[cpu_cx_next].short_slp++;
872 cpu_cx_stats[cpu_cx_next].long_slp++;
875 * If we slept 100 us or more, use the lowest Cx state.
876 * Otherwise, find the lowest state that has a latency less than
877 * or equal to the length of our last sleep.
880 cpu_cx_next = cpu_cx_lowest;
882 for (i = cpu_cx_lowest; i >= 0; i--) {
883 if (sc->cpu_cx_states[i].trans_lat <= asleep) {
890 /* Decrement reference count checked by acpi_cpu_shutdown(). */
891 atomic_subtract_int(&cpu_idle_busy, 1);
894 /* Put the CPU in C1 in a machine-dependant way. */
899 ia64_call_pal_static(PAL_HALT_LIGHT, 0, 0, 0);
901 __asm __volatile("sti; hlt");
905 /* Find the difference between two PM tick counts. */
907 acpi_pm_ticksub(uint32_t *end, const uint32_t *start)
910 *end = *end - *start;
911 else if (AcpiGbl_FADT->TmrValExt == 0)
912 *end = (((0x00FFFFFF - *start) + *end + 1) & 0x00FFFFFF);
914 *end = ((0xFFFFFFFF - *start) + *end + 1);
918 * Re-evaluate the _PSS and _CST objects when we are notified that they
921 * XXX Re-evaluation disabled until locking is done.
924 acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context)
926 struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)context;
929 case ACPI_CPU_NOTIFY_PERF_STATES:
930 device_printf(sc->cpu_dev, "Performance states changed\n");
931 /* acpi_cpu_px_available(sc); */
933 case ACPI_CPU_NOTIFY_CX_STATES:
934 device_printf(sc->cpu_dev, "Cx states changed\n");
935 /* acpi_cpu_cx_cst(sc); */
938 device_printf(sc->cpu_dev, "Unknown notify %#x\n", notify);
944 acpi_cpu_quirks(struct acpi_cpu_softc *sc)
948 * C3 is not supported on multiple CPUs since this would require
949 * flushing all caches which is currently too expensive.
952 cpu_quirks |= CPU_QUIRK_NO_C3;
955 /* Look for various quirks of the PIIX4 part. */
956 acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3);
957 if (acpi_dev != NULL) {
958 switch (pci_get_revid(acpi_dev)) {
960 * Disable throttling control on PIIX4 A and B-step.
961 * See specification changes #13 ("Manual Throttle Duty Cycle")
962 * and #14 ("Enabling and Disabling Manual Throttle"), plus
963 * erratum #5 ("STPCLK# Deassertion Time") from the January
964 * 2002 PIIX4 specification update. Note that few (if any)
965 * mobile systems ever used this part.
967 case PCI_REVISION_A_STEP:
968 case PCI_REVISION_B_STEP:
969 cpu_quirks |= CPU_QUIRK_NO_THROTTLE;
972 * Disable C3 support for all PIIX4 chipsets. Some of these parts
973 * do not report the BMIDE status to the BM status register and
974 * others have a livelock bug if Type-F DMA is enabled. Linux
975 * works around the BMIDE bug by reading the BM status directly
976 * but we take the simpler approach of disabling C3 for these
979 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
980 * Livelock") from the January 2002 PIIX4 specification update.
981 * Applies to all PIIX4 models.
983 case PCI_REVISION_4E:
984 case PCI_REVISION_4M:
985 cpu_quirks |= CPU_QUIRK_NO_C3;
996 /* Handle changes in the CPU throttling setting. */
998 acpi_cpu_throttle_sysctl(SYSCTL_HANDLER_ARGS)
1005 argp = (uint32_t *)oidp->oid_arg1;
1007 error = sysctl_handle_int(oidp, &arg, 0, req);
1009 /* Error or no new value */
1010 if (error != 0 || req->newptr == NULL)
1012 if (arg < 1 || arg > cpu_throttle_max)
1015 /* If throttling changed, notify the BIOS of the new rate. */
1019 acpi_cpu_throttle_set(arg);
1027 acpi_cpu_history_sysctl(SYSCTL_HANDLER_ARGS)
1033 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
1034 for (i = 0; i < cpu_cx_count; i++) {
1035 sbuf_printf(&sb, "%u/%u ", cpu_cx_stats[i].long_slp,
1036 cpu_cx_stats[i].short_slp);
1040 sysctl_handle_string(oidp, sbuf_data(&sb), 0, req);
1046 acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)
1048 struct acpi_cpu_softc *sc;
1051 sc = device_get_softc(cpu_devices[0]);
1052 val = cpu_cx_lowest;
1053 error = sysctl_handle_int(oidp, &val, 0, req);
1054 if (error != 0 || req->newptr == NULL)
1056 if (val < 0 || val > cpu_cx_count - 1)
1059 /* Use the new value for the next idle slice. */
1060 cpu_cx_lowest = val;
1063 /* If not disabling, cache the new lowest non-C3 state. */
1065 for (i = cpu_cx_lowest; i >= 0; i--) {
1066 if (sc->cpu_cx_states[i].type < ACPI_STATE_C3) {
1072 /* Reset the statistics counters. */
1073 memset(cpu_cx_stats, 0, sizeof(cpu_cx_stats));