2 * Copyright (c) 2003-2005 Nate Lawson (SDG)
3 * Copyright (c) 2001 Michael Smith
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $FreeBSD: src/sys/dev/acpica/acpi_cpu.c,v 1.72 2008/04/12 12:06:00 rpaulo Exp $
31 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/globaldata.h>
36 #include <sys/power.h>
39 #include <sys/thread2.h>
40 #include <sys/serialize.h>
41 #include <sys/msgport2.h>
43 #include <bus/pci/pcivar.h>
44 #include <machine/atomic.h>
45 #include <machine/globaldata.h>
46 #include <machine/md_var.h>
47 #include <machine/smp.h>
50 #include <net/netisr2.h>
51 #include <net/netmsg2.h>
52 #include <net/if_var.h>
59 * Support for ACPI Processor devices, including C[1-3] sleep states.
62 /* Hooks for the ACPI CA debugging infrastructure */
63 #define _COMPONENT ACPI_PROCESSOR
64 ACPI_MODULE_NAME("PROCESSOR")
66 struct netmsg_acpi_cst {
67 struct netmsg_base base;
68 struct acpi_cpu_softc *sc;
73 struct resource *p_lvlx; /* Register to read to enter state. */
74 int rid; /* rid of p_lvlx */
75 uint32_t type; /* C1-3 (C4 and up treated as C3). */
76 uint32_t trans_lat; /* Transition latency (usec). */
77 uint32_t power; /* Power consumed (mW). */
78 int res_type; /* Resource type for p_lvlx. */
80 #define MAX_CX_STATES 8
82 struct acpi_cpu_softc {
84 struct acpi_cpux_softc *cpu_parent;
85 ACPI_HANDLE cpu_handle;
87 uint32_t cst_flags; /* ACPI_CST_FLAG_ */
88 uint32_t cpu_p_blk; /* ACPI P_BLK location */
89 uint32_t cpu_p_blk_len; /* P_BLK length (must be 6). */
90 struct acpi_cx cpu_cx_states[MAX_CX_STATES];
91 int cpu_cx_count; /* Number of valid Cx states. */
92 int cpu_prev_sleep;/* Last idle sleep duration. */
94 int cpu_non_c3; /* Index of lowest non-C3 state. */
95 u_long cpu_cx_stats[MAX_CX_STATES];/* Cx usage history. */
96 /* Values for sysctl. */
97 int cpu_cx_lowest; /* Current Cx lowest */
98 int cpu_cx_lowest_req; /* Requested Cx lowest */
99 char cpu_cx_supported[64];
102 #define ACPI_CST_FLAG_PROBING 0x1
104 struct acpi_cpu_device {
105 struct resource_list ad_rl;
108 #define CPU_GET_REG(reg, width) \
109 (bus_space_read_ ## width(rman_get_bustag((reg)), \
110 rman_get_bushandle((reg)), 0))
111 #define CPU_SET_REG(reg, width, val) \
112 (bus_space_write_ ## width(rman_get_bustag((reg)), \
113 rman_get_bushandle((reg)), 0, (val)))
115 #define PM_USEC(x) ((x) >> 2) /* ~4 clocks per usec (3.57955 Mhz) */
117 #define ACPI_NOTIFY_CX_STATES 0x81 /* _CST changed. */
119 #define CPU_QUIRK_NO_C3 (1<<0) /* C3-type states are not usable. */
120 #define CPU_QUIRK_NO_BM_CTRL (1<<2) /* No bus mastering control. */
122 #define PCI_VENDOR_INTEL 0x8086
123 #define PCI_DEVICE_82371AB_3 0x7113 /* PIIX4 chipset for quirks. */
124 #define PCI_REVISION_A_STEP 0
125 #define PCI_REVISION_B_STEP 1
126 #define PCI_REVISION_4E 2
127 #define PCI_REVISION_4M 3
128 #define PIIX4_DEVACTB_REG 0x58
129 #define PIIX4_BRLD_EN_IRQ0 (1<<0)
130 #define PIIX4_BRLD_EN_IRQ (1<<1)
131 #define PIIX4_BRLD_EN_IRQ8 (1<<5)
132 #define PIIX4_STOP_BREAK_MASK (PIIX4_BRLD_EN_IRQ0 | PIIX4_BRLD_EN_IRQ | PIIX4_BRLD_EN_IRQ8)
133 #define PIIX4_PCNTRL_BST_EN (1<<10)
135 /* Platform hardware resource information. */
136 static uint32_t cpu_smi_cmd; /* Value to write to SMI_CMD. */
137 static uint8_t cpu_cst_cnt; /* Indicate we are _CST aware. */
138 static int cpu_quirks; /* Indicate any hardware bugs. */
141 static int cpu_disable_idle; /* Disable entry to idle function */
142 static int cpu_cx_count; /* Number of valid Cx states */
144 /* Values for sysctl. */
145 static int cpu_cx_generic;
146 static int cpu_cx_lowest; /* Current Cx lowest */
147 static int cpu_cx_lowest_req; /* Requested Cx lowest */
148 static struct lwkt_serialize cpu_cx_slize = LWKT_SERIALIZE_INITIALIZER;
150 /* C3 state transition */
151 static int cpu_c3_ncpus;
153 static device_t *cpu_devices;
154 static int cpu_ndevices;
155 static struct acpi_cpu_softc **cpu_softc;
157 static int acpi_cpu_cst_probe(device_t dev);
158 static int acpi_cpu_cst_attach(device_t dev);
159 static int acpi_cpu_cst_suspend(device_t dev);
160 static int acpi_cpu_cst_resume(device_t dev);
161 static struct resource_list *acpi_cpu_cst_get_rlist(device_t dev,
163 static device_t acpi_cpu_cst_add_child(device_t bus, device_t parent,
164 int order, const char *name, int unit);
165 static int acpi_cpu_cst_read_ivar(device_t dev, device_t child,
166 int index, uintptr_t *result);
167 static int acpi_cpu_cst_shutdown(device_t dev);
168 static void acpi_cpu_cx_probe(struct acpi_cpu_softc *sc);
169 static void acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc);
170 static int acpi_cpu_cx_cst(struct acpi_cpu_softc *sc);
171 static int acpi_cpu_cx_cst_dispatch(struct acpi_cpu_softc *sc);
172 static void acpi_cpu_startup(void *arg);
173 static void acpi_cpu_startup_cx(struct acpi_cpu_softc *sc);
174 static void acpi_cpu_cx_list(struct acpi_cpu_softc *sc);
175 static void acpi_cpu_idle(void);
176 static void acpi_cpu_cst_notify(device_t);
177 static int acpi_cpu_quirks(void);
178 static int acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS);
179 static int acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *, int);
180 static int acpi_cpu_set_cx_lowest_oncpu(struct acpi_cpu_softc *, int);
181 static int acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
182 static int acpi_cpu_cx_lowest_use_sysctl(SYSCTL_HANDLER_ARGS);
183 static int acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS);
184 static int acpi_cpu_global_cx_lowest_use_sysctl(SYSCTL_HANDLER_ARGS);
185 static void acpi_cpu_cx_non_c3(struct acpi_cpu_softc *sc);
186 static void acpi_cpu_global_cx_count(void);
188 static void acpi_cpu_c1(void); /* XXX */
190 static device_method_t acpi_cpu_cst_methods[] = {
191 /* Device interface */
192 DEVMETHOD(device_probe, acpi_cpu_cst_probe),
193 DEVMETHOD(device_attach, acpi_cpu_cst_attach),
194 DEVMETHOD(device_detach, bus_generic_detach),
195 DEVMETHOD(device_shutdown, acpi_cpu_cst_shutdown),
196 DEVMETHOD(device_suspend, acpi_cpu_cst_suspend),
197 DEVMETHOD(device_resume, acpi_cpu_cst_resume),
200 DEVMETHOD(bus_add_child, acpi_cpu_cst_add_child),
201 DEVMETHOD(bus_read_ivar, acpi_cpu_cst_read_ivar),
202 DEVMETHOD(bus_get_resource_list, acpi_cpu_cst_get_rlist),
203 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
204 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
205 DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource),
206 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
207 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
208 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
209 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
210 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
211 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
215 static driver_t acpi_cpu_cst_driver = {
217 acpi_cpu_cst_methods,
218 sizeof(struct acpi_cpu_softc),
221 static devclass_t acpi_cpu_cst_devclass;
222 DRIVER_MODULE(cpu_cst, cpu, acpi_cpu_cst_driver, acpi_cpu_cst_devclass, NULL, NULL);
223 MODULE_DEPEND(cpu_cst, acpi, 1, 1, 1);
226 acpi_cpu_cst_probe(device_t dev)
230 if (acpi_disabled("cpu_cst") || acpi_get_type(dev) != ACPI_TYPE_PROCESSOR)
233 cpu_id = acpi_get_magic(dev);
235 if (cpu_softc == NULL)
236 cpu_softc = kmalloc(sizeof(struct acpi_cpu_softc *) *
237 SMP_MAXCPU, M_TEMP /* XXX */, M_INTWAIT | M_ZERO);
240 * Check if we already probed this processor. We scan the bus twice
241 * so it's possible we've already seen this one.
243 if (cpu_softc[cpu_id] != NULL) {
244 device_printf(dev, "CPU%d cstate already exist\n", cpu_id);
248 /* Mark this processor as in-use and save our derived id for attach. */
249 cpu_softc[cpu_id] = (void *)1;
250 device_set_desc(dev, "ACPI CPU C-State");
256 acpi_cpu_cst_attach(device_t dev)
260 struct acpi_cpu_softc *sc;
263 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
265 sc = device_get_softc(dev);
267 sc->cpu_parent = device_get_softc(device_get_parent(dev));
268 sc->cpu_handle = acpi_get_handle(dev);
269 sc->cpu_id = acpi_get_magic(dev);
270 cpu_softc[sc->cpu_id] = sc;
271 cpu_smi_cmd = AcpiGbl_FADT.SmiCommand;
272 cpu_cst_cnt = AcpiGbl_FADT.CstControl;
275 buf.Length = ACPI_ALLOCATE_BUFFER;
276 status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf);
277 if (ACPI_FAILURE(status)) {
278 device_printf(dev, "attach failed to get Processor obj - %s\n",
279 AcpiFormatException(status));
282 obj = (ACPI_OBJECT *)buf.Pointer;
283 sc->cpu_p_blk = obj->Processor.PblkAddress;
284 sc->cpu_p_blk_len = obj->Processor.PblkLength;
286 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_BLK at %#x/%d\n",
287 device_get_unit(dev), sc->cpu_p_blk, sc->cpu_p_blk_len));
290 * If this is the first cpu we attach, create and initialize the generic
291 * resources that will be used by all acpi cpu devices.
293 if (device_get_unit(dev) == 0) {
294 /* Assume we won't be using generic Cx mode by default */
295 cpu_cx_generic = FALSE;
297 /* Queue post cpu-probing task handler */
298 AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_cpu_startup, NULL);
301 /* Probe for Cx state support. */
302 acpi_cpu_cx_probe(sc);
304 /* Finally, call identify and probe/attach for child devices. */
305 bus_generic_probe(dev);
306 bus_generic_attach(dev);
312 * Disable any entry to the idle function during suspend and re-enable it
316 acpi_cpu_cst_suspend(device_t dev)
320 error = bus_generic_suspend(dev);
323 cpu_disable_idle = TRUE;
328 acpi_cpu_cst_resume(device_t dev)
331 cpu_disable_idle = FALSE;
332 return (bus_generic_resume(dev));
335 static struct resource_list *
336 acpi_cpu_cst_get_rlist(device_t dev, device_t child)
338 struct acpi_cpu_device *ad;
340 ad = device_get_ivars(child);
347 acpi_cpu_cst_add_child(device_t bus, device_t parent, int order,
348 const char *name, int unit)
350 struct acpi_cpu_device *ad;
353 if ((ad = kmalloc(sizeof(*ad), M_TEMP, M_NOWAIT | M_ZERO)) == NULL)
356 resource_list_init(&ad->ad_rl);
358 child = device_add_child_ordered(parent, order, name, unit);
360 device_set_ivars(child, ad);
367 acpi_cpu_cst_read_ivar(device_t dev, device_t child, int index,
370 struct acpi_cpu_softc *sc;
372 sc = device_get_softc(dev);
374 case ACPI_IVAR_HANDLE:
375 *result = (uintptr_t)sc->cpu_handle;
379 *result = (uintptr_t)sc->cpu_pcpu;
389 acpi_cpu_cst_shutdown(device_t dev)
391 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
393 /* Allow children to shutdown first. */
394 bus_generic_shutdown(dev);
397 * Disable any entry to the idle function. There is a small race where
398 * an idle thread have passed this check but not gone to sleep. This
399 * is ok since device_shutdown() does not free the softc, otherwise
400 * we'd have to be sure all threads were evicted before returning.
402 cpu_disable_idle = TRUE;
408 acpi_cpu_cx_probe(struct acpi_cpu_softc *sc)
410 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
412 /* Use initial sleep value of 1 sec. to start with lowest idle state. */
413 sc->cpu_prev_sleep = 1000000;
414 sc->cpu_cx_lowest = 0;
415 sc->cpu_cx_lowest_req = 0;
418 * Check for the ACPI 2.0 _CST sleep states object. If we can't find
419 * any, we'll revert to generic FADT/P_BLK Cx control method which will
420 * be handled by acpi_cpu_startup. We need to defer to after having
421 * probed all the cpus in the system before probing for generic Cx
422 * states as we may already have found cpus with valid _CST packages
424 if (!cpu_cx_generic && acpi_cpu_cx_cst(sc) != 0) {
426 * We were unable to find a _CST package for this cpu or there
427 * was an error parsing it. Switch back to generic mode.
429 cpu_cx_generic = TRUE;
431 device_printf(sc->cpu_dev, "switching to generic Cx mode\n");
435 * TODO: _CSD Package should be checked here.
440 acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc)
442 ACPI_GENERIC_ADDRESS gas;
443 struct acpi_cx *cx_ptr;
445 sc->cpu_cx_count = 0;
446 cx_ptr = sc->cpu_cx_states;
448 /* Use initial sleep value of 1 sec. to start with lowest idle state. */
449 sc->cpu_prev_sleep = 1000000;
451 /* C1 has been required since just after ACPI 1.0 */
452 cx_ptr->type = ACPI_STATE_C1;
453 cx_ptr->trans_lat = 0;
458 * The spec says P_BLK must be 6 bytes long. However, some systems
459 * use it to indicate a fractional set of features present so we
460 * take 5 as C2. Some may also have a value of 7 to indicate
461 * another C3 but most use _CST for this (as required) and having
462 * "only" C1-C3 is not a hardship.
464 if (sc->cpu_p_blk_len < 5)
467 /* Validate and allocate resources for C2 (P_LVL2). */
468 gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO;
470 if (AcpiGbl_FADT.C2Latency <= 100) {
471 gas.Address = sc->cpu_p_blk + 4;
473 cx_ptr->rid = sc->cpu_parent->cpux_next_rid;
474 acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->type, &cx_ptr->rid, &gas,
475 &cx_ptr->p_lvlx, RF_SHAREABLE);
476 if (cx_ptr->p_lvlx != NULL) {
477 sc->cpu_parent->cpux_next_rid++;
478 cx_ptr->type = ACPI_STATE_C2;
479 cx_ptr->trans_lat = AcpiGbl_FADT.C2Latency;
485 if (sc->cpu_p_blk_len < 6)
488 /* Validate and allocate resources for C3 (P_LVL3). */
489 if (AcpiGbl_FADT.C3Latency <= 1000 && !(cpu_quirks & CPU_QUIRK_NO_C3)) {
490 gas.Address = sc->cpu_p_blk + 5;
492 cx_ptr->rid = sc->cpu_parent->cpux_next_rid;
493 acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->type, &cx_ptr->rid, &gas,
494 &cx_ptr->p_lvlx, RF_SHAREABLE);
495 if (cx_ptr->p_lvlx != NULL) {
496 sc->cpu_parent->cpux_next_rid++;
497 cx_ptr->type = ACPI_STATE_C3;
498 cx_ptr->trans_lat = AcpiGbl_FADT.C3Latency;
506 * Parse a _CST package and set up its Cx states. Since the _CST object
507 * can change dynamically, our notify handler may call this function
508 * to clean up and probe the new _CST package.
511 acpi_cpu_cx_cst(struct acpi_cpu_softc *sc)
513 struct acpi_cx *cx_ptr;
521 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
524 buf.Length = ACPI_ALLOCATE_BUFFER;
525 status = AcpiEvaluateObject(sc->cpu_handle, "_CST", NULL, &buf);
526 if (ACPI_FAILURE(status))
529 /* _CST is a package with a count and at least one Cx package. */
530 top = (ACPI_OBJECT *)buf.Pointer;
531 if (!ACPI_PKG_VALID(top, 2) || acpi_PkgInt32(top, 0, &count) != 0) {
532 device_printf(sc->cpu_dev, "invalid _CST package\n");
533 AcpiOsFree(buf.Pointer);
536 if (count != top->Package.Count - 1) {
537 device_printf(sc->cpu_dev, "invalid _CST state count (%d != %d)\n",
538 count, top->Package.Count - 1);
539 count = top->Package.Count - 1;
541 if (count > MAX_CX_STATES) {
542 device_printf(sc->cpu_dev, "_CST has too many states (%d)\n", count);
543 count = MAX_CX_STATES;
546 sc->cst_flags |= ACPI_CST_FLAG_PROBING;
549 /* Set up all valid states. */
550 sc->cpu_cx_count = 0;
551 cx_ptr = sc->cpu_cx_states;
552 for (i = 0; i < count; i++) {
553 pkg = &top->Package.Elements[i + 1];
554 if (!ACPI_PKG_VALID(pkg, 4) ||
555 acpi_PkgInt32(pkg, 1, &cx_ptr->type) != 0 ||
556 acpi_PkgInt32(pkg, 2, &cx_ptr->trans_lat) != 0 ||
557 acpi_PkgInt32(pkg, 3, &cx_ptr->power) != 0) {
559 device_printf(sc->cpu_dev, "skipping invalid Cx state package\n");
563 /* Validate the state to see if we should use it. */
564 switch (cx_ptr->type) {
575 if ((cpu_quirks & CPU_QUIRK_NO_C3) != 0) {
577 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
578 "acpi_cpu%d: C3[%d] not available.\n",
579 device_get_unit(sc->cpu_dev), i));
586 /* Free up any previous register. */
587 if (cx_ptr->p_lvlx != NULL) {
588 bus_release_resource(sc->cpu_dev, 0, 0, cx_ptr->p_lvlx);
589 cx_ptr->p_lvlx = NULL;
593 /* Allocate the control register for C2 or C3. */
594 cx_ptr->rid = sc->cpu_parent->cpux_next_rid;
595 acpi_PkgGas(sc->cpu_dev, pkg, 0, &cx_ptr->res_type, &cx_ptr->rid,
596 &cx_ptr->p_lvlx, RF_SHAREABLE);
597 if (cx_ptr->p_lvlx) {
598 sc->cpu_parent->cpux_next_rid++;
599 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
600 "acpi_cpu%d: Got C%d - %d latency\n",
601 device_get_unit(sc->cpu_dev), cx_ptr->type,
607 AcpiOsFree(buf.Pointer);
610 * Fix up the lowest Cx being used
612 if (sc->cpu_cx_lowest_req < sc->cpu_cx_count)
613 sc->cpu_cx_lowest = sc->cpu_cx_lowest_req;
614 if (sc->cpu_cx_lowest > sc->cpu_cx_count - 1)
615 sc->cpu_cx_lowest = sc->cpu_cx_count - 1;
618 * Cache the lowest non-C3 state.
619 * NOTE: must after cpu_cx_lowest is set.
621 acpi_cpu_cx_non_c3(sc);
624 sc->cst_flags &= ~ACPI_CST_FLAG_PROBING;
630 acpi_cst_probe_handler(netmsg_t msg)
632 struct netmsg_acpi_cst *rmsg = (struct netmsg_acpi_cst *)msg;
635 error = acpi_cpu_cx_cst(rmsg->sc);
636 lwkt_replymsg(&rmsg->base.lmsg, error);
640 acpi_cpu_cx_cst_dispatch(struct acpi_cpu_softc *sc)
642 struct netmsg_acpi_cst msg;
644 netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY,
645 acpi_cst_probe_handler);
648 return lwkt_domsg(netisr_cpuport(sc->cpu_id), &msg.base.lmsg, 0);
652 * Call this *after* all CPUs have been attached.
655 acpi_cpu_startup(void *arg)
657 struct acpi_cpu_softc *sc;
660 /* Get set of CPU devices */
661 devclass_get_devices(acpi_cpu_cst_devclass, &cpu_devices, &cpu_ndevices);
664 * Setup any quirks that might necessary now that we have probed
669 if (cpu_cx_generic) {
671 * We are using generic Cx mode, probe for available Cx states
672 * for all processors.
674 for (i = 0; i < cpu_ndevices; i++) {
675 sc = device_get_softc(cpu_devices[i]);
676 acpi_cpu_generic_cx_probe(sc);
680 * We are using _CST mode, remove C3 state if necessary.
682 * As we now know for sure that we will be using _CST mode
683 * install our notify handler.
685 for (i = 0; i < cpu_ndevices; i++) {
686 sc = device_get_softc(cpu_devices[i]);
687 if (cpu_quirks & CPU_QUIRK_NO_C3)
688 sc->cpu_cx_count = sc->cpu_non_c3 + 1;
689 sc->cpu_parent->cpux_cst_notify = acpi_cpu_cst_notify;
692 acpi_cpu_global_cx_count();
694 /* Perform Cx final initialization. */
695 for (i = 0; i < cpu_ndevices; i++) {
696 sc = device_get_softc(cpu_devices[i]);
697 acpi_cpu_startup_cx(sc);
699 if (sc->cpu_parent->glob_sysctl_tree != NULL) {
700 struct acpi_cpux_softc *cpux = sc->cpu_parent;
702 /* Add a sysctl handler to handle global Cx lowest setting */
703 SYSCTL_ADD_PROC(&cpux->glob_sysctl_ctx,
704 SYSCTL_CHILDREN(cpux->glob_sysctl_tree),
705 OID_AUTO, "cx_lowest",
706 CTLTYPE_STRING | CTLFLAG_RW, NULL, 0,
707 acpi_cpu_global_cx_lowest_sysctl, "A",
708 "Requested global lowest Cx sleep state");
709 SYSCTL_ADD_PROC(&cpux->glob_sysctl_ctx,
710 SYSCTL_CHILDREN(cpux->glob_sysctl_tree),
711 OID_AUTO, "cx_lowest_use",
712 CTLTYPE_STRING | CTLFLAG_RD, NULL, 0,
713 acpi_cpu_global_cx_lowest_use_sysctl, "A",
714 "Global lowest Cx sleep state to use");
718 /* Take over idling from cpu_idle_default(). */
720 cpu_cx_lowest_req = 0;
721 cpu_disable_idle = FALSE;
722 cpu_idle_hook = acpi_cpu_idle;
726 acpi_cpu_cx_list(struct acpi_cpu_softc *sc)
732 * Set up the list of Cx states
734 sbuf_new(&sb, sc->cpu_cx_supported, sizeof(sc->cpu_cx_supported),
736 for (i = 0; i < sc->cpu_cx_count; i++)
737 sbuf_printf(&sb, "C%d/%d ", i + 1, sc->cpu_cx_states[i].trans_lat);
743 acpi_cpu_startup_cx(struct acpi_cpu_softc *sc)
745 struct acpi_cpux_softc *cpux = sc->cpu_parent;
747 acpi_cpu_cx_list(sc);
749 SYSCTL_ADD_STRING(&cpux->pcpu_sysctl_ctx,
750 SYSCTL_CHILDREN(cpux->pcpu_sysctl_tree),
751 OID_AUTO, "cx_supported", CTLFLAG_RD,
752 sc->cpu_cx_supported, 0,
753 "Cx/microsecond values for supported Cx states");
754 SYSCTL_ADD_PROC(&cpux->pcpu_sysctl_ctx,
755 SYSCTL_CHILDREN(cpux->pcpu_sysctl_tree),
756 OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW,
757 (void *)sc, 0, acpi_cpu_cx_lowest_sysctl, "A",
758 "requested lowest Cx sleep state");
759 SYSCTL_ADD_PROC(&cpux->pcpu_sysctl_ctx,
760 SYSCTL_CHILDREN(cpux->pcpu_sysctl_tree),
761 OID_AUTO, "cx_lowest_use", CTLTYPE_STRING | CTLFLAG_RD,
762 (void *)sc, 0, acpi_cpu_cx_lowest_use_sysctl, "A",
763 "lowest Cx sleep state to use");
764 SYSCTL_ADD_PROC(&cpux->pcpu_sysctl_ctx,
765 SYSCTL_CHILDREN(cpux->pcpu_sysctl_tree),
766 OID_AUTO, "cx_usage", CTLTYPE_STRING | CTLFLAG_RD,
767 (void *)sc, 0, acpi_cpu_usage_sysctl, "A",
768 "percent usage for each Cx state");
771 /* Signal platform that we can handle _CST notification. */
772 if (!cpu_cx_generic && cpu_cst_cnt != 0) {
774 AcpiOsWritePort(cpu_smi_cmd, cpu_cst_cnt, 8);
781 * Idle the CPU in the lowest state possible. This function is called with
782 * interrupts disabled. Note that once it re-enables interrupts, a task
783 * switch can occur so do not access shared data (i.e. the softc) after
784 * interrupts are re-enabled.
789 struct acpi_cpu_softc *sc;
790 struct acpi_cx *cx_next;
791 uint64_t start_time, end_time;
792 int bm_active, cx_next_idx, i;
794 /* If disabled, return immediately. */
795 if (cpu_disable_idle) {
801 * Look up our CPU id to get our softc. If it's NULL, we'll use C1
802 * since there is no ACPI processor object for this CPU. This occurs
803 * for logical CPUs in the HTT case.
805 sc = cpu_softc[mdcpu->mi.gd_cpuid];
811 /* Still probing; use C1 */
812 if (sc->cst_flags & ACPI_CST_FLAG_PROBING) {
817 /* Find the lowest state that has small enough latency. */
819 for (i = sc->cpu_cx_lowest; i >= 0; i--) {
820 if (sc->cpu_cx_states[i].trans_lat * 3 <= sc->cpu_prev_sleep) {
827 * Check for bus master activity. If there was activity, clear
828 * the bit and use the lowest non-C3 state. Note that the USB
829 * driver polling for new devices keeps this bit set all the
830 * time if USB is loaded.
832 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
833 AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active);
834 if (bm_active != 0) {
835 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1);
836 cx_next_idx = min(cx_next_idx, sc->cpu_non_c3);
840 /* Select the next state and update statistics. */
841 cx_next = &sc->cpu_cx_states[cx_next_idx];
842 sc->cpu_cx_stats[cx_next_idx]++;
843 KASSERT(cx_next->type != ACPI_STATE_C0, ("acpi_cpu_idle: C0 sleep"));
846 * Execute HLT (or equivalent) and wait for an interrupt. We can't
847 * calculate the time spent in C1 since the place we wake up is an
848 * ISR. Assume we slept half of quantum and return.
850 if (cx_next->type == ACPI_STATE_C1) {
851 sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + 500000 / hz) / 4;
857 * For C3(+), disable bus master arbitration and enable bus master wake
858 * if BM control is available, otherwise flush the CPU cache.
860 if (cx_next->type >= ACPI_STATE_C3) {
861 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
862 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1);
863 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1);
865 ACPI_FLUSH_CPU_CACHE();
869 * Read from P_LVLx to enter C2(+), checking time spent asleep.
870 * Use the ACPI timer for measuring sleep time. Since we need to
871 * get the time very close to the CPU start/stop clock logic, this
872 * is the only reliable time source.
874 AcpiRead(&start_time, &AcpiGbl_FADT.XPmTimerBlock);
875 CPU_GET_REG(cx_next->p_lvlx, 1);
878 * Read the end time twice. Since it may take an arbitrary time
879 * to enter the idle state, the first read may be executed before
880 * the processor has stopped. Doing it again provides enough
881 * margin that we are certain to have a correct value.
883 AcpiRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock);
884 AcpiRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock);
886 /* Enable bus master arbitration and disable bus master wakeup. */
887 if (cx_next->type >= ACPI_STATE_C3) {
888 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
889 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);
890 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
895 /* Find the actual time asleep in microseconds. */
896 end_time = acpi_TimerDelta(end_time, start_time);
897 sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + PM_USEC(end_time)) / 4;
901 * Re-evaluate the _CST object when we are notified that it changed.
904 acpi_cpu_cst_notify(device_t dev)
906 struct acpi_cpu_softc *sc = device_get_softc(dev);
908 KASSERT(curthread->td_type != TD_TYPE_NETISR,
909 ("notify in netisr%d", mycpuid));
911 lwkt_serialize_enter(&cpu_cx_slize);
913 /* Update the list of Cx states. */
914 acpi_cpu_cx_cst_dispatch(sc);
915 acpi_cpu_cx_list(sc);
917 /* Update the new lowest useable Cx state for all CPUs. */
918 acpi_cpu_global_cx_count();
921 * Fix up the lowest Cx being used
923 if (cpu_cx_lowest_req < cpu_cx_count)
924 cpu_cx_lowest = cpu_cx_lowest_req;
925 if (cpu_cx_lowest > cpu_cx_count - 1)
926 cpu_cx_lowest = cpu_cx_count - 1;
928 lwkt_serialize_exit(&cpu_cx_slize);
932 acpi_cpu_quirks(void)
937 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
940 * Bus mastering arbitration control is needed to keep caches coherent
941 * while sleeping in C3. If it's not present but a working flush cache
942 * instruction is present, flush the caches before entering C3 instead.
943 * Otherwise, just disable C3 completely.
945 if (AcpiGbl_FADT.Pm2ControlBlock == 0 ||
946 AcpiGbl_FADT.Pm2ControlLength == 0) {
947 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) &&
948 (AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0) {
949 cpu_quirks |= CPU_QUIRK_NO_BM_CTRL;
950 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
951 "acpi_cpu: no BM control, using flush cache method\n"));
953 cpu_quirks |= CPU_QUIRK_NO_C3;
954 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
955 "acpi_cpu: no BM control, C3 not available\n"));
960 * If we are using generic Cx mode, C3 on multiple CPUs requires using
961 * the expensive flush cache instruction.
963 if (cpu_cx_generic && ncpus > 1) {
964 cpu_quirks |= CPU_QUIRK_NO_BM_CTRL;
965 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
966 "acpi_cpu: SMP, using flush cache mode for C3\n"));
969 /* Look for various quirks of the PIIX4 part. */
970 acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3);
971 if (acpi_dev != NULL) {
972 switch (pci_get_revid(acpi_dev)) {
974 * Disable C3 support for all PIIX4 chipsets. Some of these parts
975 * do not report the BMIDE status to the BM status register and
976 * others have a livelock bug if Type-F DMA is enabled. Linux
977 * works around the BMIDE bug by reading the BM status directly
978 * but we take the simpler approach of disabling C3 for these
981 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
982 * Livelock") from the January 2002 PIIX4 specification update.
983 * Applies to all PIIX4 models.
985 * Also, make sure that all interrupts cause a "Stop Break"
986 * event to exit from C2 state.
987 * Also, BRLD_EN_BM (ACPI_BITREG_BUS_MASTER_RLD in ACPI-speak)
988 * should be set to zero, otherwise it causes C2 to short-sleep.
989 * PIIX4 doesn't properly support C3 and bus master activity
990 * need not break out of C2.
992 case PCI_REVISION_A_STEP:
993 case PCI_REVISION_B_STEP:
994 case PCI_REVISION_4E:
995 case PCI_REVISION_4M:
996 cpu_quirks |= CPU_QUIRK_NO_C3;
997 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
998 "acpi_cpu: working around PIIX4 bug, disabling C3\n"));
1000 val = pci_read_config(acpi_dev, PIIX4_DEVACTB_REG, 4);
1001 if ((val & PIIX4_STOP_BREAK_MASK) != PIIX4_STOP_BREAK_MASK) {
1002 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1003 "acpi_cpu: PIIX4: enabling IRQs to generate Stop Break\n"));
1004 val |= PIIX4_STOP_BREAK_MASK;
1005 pci_write_config(acpi_dev, PIIX4_DEVACTB_REG, val, 4);
1007 AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_RLD, &val);
1009 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1010 "acpi_cpu: PIIX4: reset BRLD_EN_BM\n"));
1011 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
1023 acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS)
1025 struct acpi_cpu_softc *sc;
1029 uintmax_t fract, sum, whole;
1031 sc = (struct acpi_cpu_softc *) arg1;
1033 for (i = 0; i < sc->cpu_cx_count; i++)
1034 sum += sc->cpu_cx_stats[i];
1035 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
1036 for (i = 0; i < sc->cpu_cx_count; i++) {
1038 whole = (uintmax_t)sc->cpu_cx_stats[i] * 100;
1039 fract = (whole % sum) * 100;
1040 sbuf_printf(&sb, "%u.%02u%% ", (u_int)(whole / sum),
1041 (u_int)(fract / sum));
1043 sbuf_printf(&sb, "0.00%% ");
1045 sbuf_printf(&sb, "last %dus", sc->cpu_prev_sleep);
1048 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
1055 acpi_cpu_set_cx_lowest_oncpu(struct acpi_cpu_softc *sc, int val)
1057 int old_lowest, error = 0;
1058 uint32_t old_type, type;
1060 KKASSERT(mycpuid == sc->cpu_id);
1062 sc->cpu_cx_lowest_req = val;
1063 if (val > sc->cpu_cx_count - 1)
1064 val = sc->cpu_cx_count - 1;
1065 old_lowest = atomic_swap_int(&sc->cpu_cx_lowest, val);
1067 old_type = sc->cpu_cx_states[old_lowest].type;
1068 type = sc->cpu_cx_states[val].type;
1069 if (old_type >= ACPI_STATE_C3 && type < ACPI_STATE_C3) {
1070 KKASSERT(cpu_c3_ncpus > 0);
1071 if (atomic_fetchadd_int(&cpu_c3_ncpus, -1) == 1) {
1073 * All of the CPUs exit C3 state, use a better
1076 error = cputimer_intr_select_caps(CPUTIMER_INTR_CAP_NONE);
1077 KKASSERT(!error || error == ERESTART);
1078 if (error == ERESTART) {
1080 kprintf("exit C3, restart intr cputimer\n");
1081 cputimer_intr_restart();
1084 } else if (type >= ACPI_STATE_C3 && old_type < ACPI_STATE_C3) {
1085 if (atomic_fetchadd_int(&cpu_c3_ncpus, 1) == 0) {
1087 * When the first CPU enters C3(+) state, switch
1088 * to an one shot timer, which could handle
1089 * C3(+) state, i.e. the timer will not hang.
1091 error = cputimer_intr_select_caps(CPUTIMER_INTR_CAP_PS);
1092 if (error == ERESTART) {
1094 kprintf("enter C3, restart intr cputimer\n");
1095 cputimer_intr_restart();
1097 kprintf("no suitable intr cputimer found\n");
1100 sc->cpu_cx_lowest = old_lowest;
1101 atomic_fetchadd_int(&cpu_c3_ncpus, -1);
1109 /* Cache the new lowest non-C3 state. */
1110 acpi_cpu_cx_non_c3(sc);
1112 /* Reset the statistics counters. */
1113 bzero(sc->cpu_cx_stats, sizeof(sc->cpu_cx_stats));
1118 acpi_cst_set_lowest_handler(netmsg_t msg)
1120 struct netmsg_acpi_cst *rmsg = (struct netmsg_acpi_cst *)msg;
1123 error = acpi_cpu_set_cx_lowest_oncpu(rmsg->sc, rmsg->val);
1124 lwkt_replymsg(&rmsg->base.lmsg, error);
1128 acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc, int val)
1130 struct netmsg_acpi_cst msg;
1132 netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY,
1133 acpi_cst_set_lowest_handler);
1137 return lwkt_domsg(netisr_cpuport(sc->cpu_id), &msg.base.lmsg, 0);
1141 acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)
1143 struct acpi_cpu_softc *sc;
1147 sc = (struct acpi_cpu_softc *)arg1;
1148 ksnprintf(state, sizeof(state), "C%d", sc->cpu_cx_lowest_req + 1);
1149 error = sysctl_handle_string(oidp, state, sizeof(state), req);
1150 if (error != 0 || req->newptr == NULL)
1152 if (strlen(state) < 2 || toupper(state[0]) != 'C')
1154 val = (int) strtol(state + 1, NULL, 10) - 1;
1158 lwkt_serialize_enter(&cpu_cx_slize);
1159 error = acpi_cpu_set_cx_lowest(sc, val);
1160 lwkt_serialize_exit(&cpu_cx_slize);
1166 acpi_cpu_cx_lowest_use_sysctl(SYSCTL_HANDLER_ARGS)
1168 struct acpi_cpu_softc *sc;
1171 sc = (struct acpi_cpu_softc *)arg1;
1172 ksnprintf(state, sizeof(state), "C%d", sc->cpu_cx_lowest + 1);
1173 return sysctl_handle_string(oidp, state, sizeof(state), req);
1177 acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS)
1179 struct acpi_cpu_softc *sc;
1183 ksnprintf(state, sizeof(state), "C%d", cpu_cx_lowest_req + 1);
1184 error = sysctl_handle_string(oidp, state, sizeof(state), req);
1185 if (error != 0 || req->newptr == NULL)
1187 if (strlen(state) < 2 || toupper(state[0]) != 'C')
1189 val = (int) strtol(state + 1, NULL, 10) - 1;
1193 lwkt_serialize_enter(&cpu_cx_slize);
1195 cpu_cx_lowest_req = val;
1196 cpu_cx_lowest = val;
1197 if (cpu_cx_lowest > cpu_cx_count - 1)
1198 cpu_cx_lowest = cpu_cx_count - 1;
1200 /* Update the new lowest useable Cx state for all CPUs. */
1201 for (i = 0; i < cpu_ndevices; i++) {
1202 sc = device_get_softc(cpu_devices[i]);
1203 error = acpi_cpu_set_cx_lowest(sc, val);
1210 lwkt_serialize_exit(&cpu_cx_slize);
1216 acpi_cpu_global_cx_lowest_use_sysctl(SYSCTL_HANDLER_ARGS)
1220 ksnprintf(state, sizeof(state), "C%d", cpu_cx_lowest + 1);
1221 return sysctl_handle_string(oidp, state, sizeof(state), req);
1225 * Put the CPU in C1 in a machine-dependant way.
1226 * XXX: shouldn't be here!
1232 ia64_call_pal_static(PAL_HALT_LIGHT, 0, 0, 0);
1235 if ((mycpu->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0)
1236 __asm __volatile("sti; hlt");
1238 __asm __volatile("sti; pause");
1239 #endif /* !__ia64__ */
1243 acpi_cpu_cx_non_c3(struct acpi_cpu_softc *sc)
1248 for (i = sc->cpu_cx_lowest; i >= 0; i--) {
1249 if (sc->cpu_cx_states[i].type < ACPI_STATE_C3) {
1255 device_printf(sc->cpu_dev, "non-C3 %d\n", sc->cpu_non_c3);
1259 * Update the largest Cx state supported in the global cpu_cx_count.
1260 * It will be used in the global Cx sysctl handler.
1263 acpi_cpu_global_cx_count(void)
1265 struct acpi_cpu_softc *sc;
1268 if (cpu_ndevices == 0) {
1273 sc = device_get_softc(cpu_devices[0]);
1274 cpu_cx_count = sc->cpu_cx_count;
1276 for (i = 1; i < cpu_ndevices; i++) {
1277 struct acpi_cpu_softc *sc = device_get_softc(cpu_devices[i]);
1279 if (sc->cpu_cx_count < cpu_cx_count)
1280 cpu_cx_count = sc->cpu_cx_count;
1283 kprintf("cpu_cst: global Cx count %d\n", cpu_cx_count);