Device layer rollup commit.
[dragonfly.git] / sys / dev / acpica / acpi_cpu.c
1 /*-
2  * Copyright (c) 2001 Michael Smith
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  *      $FreeBSD: src/sys/dev/acpica/acpi_cpu.c,v 1.16.4.1 2003/08/22 20:49:20 jhb Exp $
27  *      $DragonFly: src/sys/dev/acpica/Attic/acpi_cpu.c,v 1.1 2003/09/24 03:32:16 drhodus Exp $ 
28  */
29
30 #include "opt_acpi.h"
31 #include <sys/param.h>
32 #include <sys/kernel.h>
33 #include <sys/bus.h>
34 #include <sys/power.h>
35
36 #include <machine/bus_pio.h>
37 #include <machine/bus.h>
38 #include <machine/resource.h>
39 #include <sys/rman.h>
40
41 #include "acpi.h"
42
43 #include <dev/acpica/acpivar.h>
44
45 /*
46  * Support for ACPI Processor devices.
47  *
48  * Note that this only provides ACPI 1.0 support (with the exception of the
49  * PSTATE_CNT field).  2.0 support will involve implementing _PTC, _PCT,
50  * _PSS and _PPC.
51  */
52
53 /*
54  * Hooks for the ACPI CA debugging infrastructure
55  */
56 #define _COMPONENT      ACPI_PROCESSOR
57 ACPI_MODULE_NAME("PROCESSOR")
58
59 struct acpi_cpu_softc {
60     device_t            cpu_dev;
61     ACPI_HANDLE         cpu_handle;
62
63     u_int32_t           cpu_id;
64
65     /* CPU throttling control register */
66     struct resource     *cpu_p_blk;
67 #define CPU_GET_P_CNT(sc)       (bus_space_read_4(rman_get_bustag((sc)->cpu_p_blk),     \
68                                                   rman_get_bushandle((sc)->cpu_p_blk),  \
69                                                   0))
70 #define CPU_SET_P_CNT(sc, val)  (bus_space_write_4(rman_get_bustag((sc)->cpu_p_blk),    \
71                                                   rman_get_bushandle((sc)->cpu_p_blk),  \
72                                                   0, (val)))
73 #define CPU_P_CNT_THT_EN        (1<<4)
74 };
75
76 /* 
77  * Speeds are stored in counts, from 1 - CPU_MAX_SPEED, and
78  * reported to the user in tenths of a percent.
79  */
80 static u_int32_t        cpu_duty_offset;
81 static u_int32_t        cpu_duty_width;
82 #define CPU_MAX_SPEED           (1 << cpu_duty_width)
83 #define CPU_SPEED_PERCENT(x)    ((1000 * (x)) / CPU_MAX_SPEED)
84 #define CPU_SPEED_PRINTABLE(x)  (CPU_SPEED_PERCENT(x) / 10),(CPU_SPEED_PERCENT(x) % 10)
85
86 static u_int32_t        cpu_smi_cmd;    /* should be a generic way to do this */
87 static u_int8_t         cpu_pstate_cnt;
88
89 static u_int32_t        cpu_current_state;
90 static u_int32_t        cpu_performance_state;
91 static u_int32_t        cpu_economy_state;
92 static u_int32_t        cpu_max_state;
93
94 static device_t         *cpu_devices;
95 static int              cpu_ndevices;
96
97 static struct sysctl_ctx_list   acpi_cpu_sysctl_ctx;
98 static struct sysctl_oid        *acpi_cpu_sysctl_tree;
99
100 static int      acpi_cpu_probe(device_t dev);
101 static int      acpi_cpu_attach(device_t dev);
102 static void     acpi_cpu_init_throttling(void *arg);
103 static void     acpi_cpu_set_speed(u_int32_t speed);
104 static void     acpi_cpu_power_profile(void *arg);
105 static int      acpi_cpu_speed_sysctl(SYSCTL_HANDLER_ARGS);
106
107 static device_method_t acpi_cpu_methods[] = {
108     /* Device interface */
109     DEVMETHOD(device_probe,     acpi_cpu_probe),
110     DEVMETHOD(device_attach,    acpi_cpu_attach),
111
112     {0, 0}
113 };
114
115 static driver_t acpi_cpu_driver = {
116     "acpi_cpu",
117     acpi_cpu_methods,
118     sizeof(struct acpi_cpu_softc),
119 };
120
121 static devclass_t acpi_cpu_devclass;
122 DRIVER_MODULE(acpi_cpu, acpi, acpi_cpu_driver, acpi_cpu_devclass, 0, 0);
123
124 static int
125 acpi_cpu_probe(device_t dev)
126 {
127     if (!acpi_disabled("cpu") &&
128         (acpi_get_type(dev) == ACPI_TYPE_PROCESSOR)) {
129         device_set_desc(dev, "CPU");    /* XXX get more verbose description? */
130         return(0);
131     }
132     return(ENXIO);
133 }
134
135 static int
136 acpi_cpu_attach(device_t dev)
137 {
138     struct acpi_cpu_softc       *sc;
139     struct acpi_softc           *acpi_sc;
140     ACPI_OBJECT                 processor;
141     ACPI_BUFFER                 buf;
142     ACPI_STATUS                 status;
143     u_int32_t                   p_blk;
144     u_int32_t                   p_blk_length;
145     u_int32_t                   duty_end;
146     int                         rid;
147
148     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
149
150     ACPI_ASSERTLOCK;
151
152     sc = device_get_softc(dev);
153     sc->cpu_dev = dev;
154     sc->cpu_handle = acpi_get_handle(dev);
155
156     /*
157      * Get global parameters from the FADT.
158      */
159     if (device_get_unit(sc->cpu_dev) == 0) {
160         cpu_duty_offset = AcpiGbl_FADT->DutyOffset;
161         cpu_duty_width = AcpiGbl_FADT->DutyWidth;
162         cpu_smi_cmd = AcpiGbl_FADT->SmiCmd;
163         cpu_pstate_cnt = AcpiGbl_FADT->PstateCnt;
164
165         /* validate the offset/width */
166         if (cpu_duty_width > 0) {
167                 duty_end = cpu_duty_offset + cpu_duty_width - 1;
168                 /* check that it fits */
169                 if (duty_end > 31) {
170                         printf("acpi_cpu: CLK_VAL field overflows P_CNT register\n");
171                         cpu_duty_width = 0;
172                 }
173                 /* check for overlap with the THT_EN bit */
174                 if ((cpu_duty_offset <= 4) && (duty_end >= 4)) {
175                         printf("acpi_cpu: CLK_VAL field overlaps THT_EN bit\n");
176                         cpu_duty_width = 0;
177                 }
178         }
179
180         /* 
181          * Start the throttling process once the probe phase completes, if we think that
182          * it's going to be useful.  If the duty width value is zero, there are no significant
183          * bits in the register and thus no throttled states.
184          */
185         if (cpu_duty_width > 0) {
186             AcpiOsQueueForExecution(OSD_PRIORITY_LO, acpi_cpu_init_throttling, NULL);
187
188             acpi_sc = acpi_device_get_parent_softc(dev);
189             sysctl_ctx_init(&acpi_cpu_sysctl_ctx);
190             acpi_cpu_sysctl_tree = SYSCTL_ADD_NODE(&acpi_cpu_sysctl_ctx,
191                                                   SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree),
192                                                   OID_AUTO, "cpu", CTLFLAG_RD, 0, "");
193
194             SYSCTL_ADD_INT(&acpi_cpu_sysctl_ctx, SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
195                            OID_AUTO, "max_speed", CTLFLAG_RD,
196                            &cpu_max_state, 0, "maximum CPU speed");
197             SYSCTL_ADD_INT(&acpi_cpu_sysctl_ctx, SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
198                            OID_AUTO, "current_speed", CTLFLAG_RD,
199                            &cpu_current_state, 0, "current CPU speed");
200             SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx, SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
201                             OID_AUTO, "performance_speed", CTLTYPE_INT | CTLFLAG_RW,
202                             &cpu_performance_state, 0, acpi_cpu_speed_sysctl, "I", "");
203             SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx, SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
204                             OID_AUTO, "economy_speed", CTLTYPE_INT | CTLFLAG_RW,
205                             &cpu_economy_state, 0, acpi_cpu_speed_sysctl, "I", "");
206         }
207     }
208
209     /*
210      * Get the processor object.
211      */
212     buf.Pointer = &processor;
213     buf.Length = sizeof(processor);
214     if (ACPI_FAILURE(status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf))) {
215         device_printf(sc->cpu_dev, "couldn't get Processor object - %s\n", AcpiFormatException(status));
216         return_VALUE(ENXIO);
217     }
218     if (processor.Type != ACPI_TYPE_PROCESSOR) {
219         device_printf(sc->cpu_dev, "Processor object has bad type %d\n", processor.Type);
220         return_VALUE(ENXIO);
221     }
222     sc->cpu_id = processor.Processor.ProcId;
223
224     /*
225      * If it looks like we support throttling, find this CPU's P_BLK.
226      *
227      * Note that some systems seem to duplicate the P_BLK pointer across  
228      * multiple CPUs, so not getting the resource is not fatal.
229      * 
230      * XXX should support _PTC here as well, once we work out how to parse it.
231      *
232      * XXX is it valid to assume that the P_BLK must be 6 bytes long?
233      */
234     if (cpu_duty_width > 0) {
235         p_blk = processor.Processor.PblkAddress;
236         p_blk_length = processor.Processor.PblkLength;
237     
238         /* allocate bus space if possible */
239         if ((p_blk > 0) && (p_blk_length == 6)) {
240             rid = 0;
241             bus_set_resource(sc->cpu_dev, SYS_RES_IOPORT, rid, p_blk, p_blk_length);
242             sc->cpu_p_blk = bus_alloc_resource(sc->cpu_dev, SYS_RES_IOPORT, &rid, 0, ~0, 1,
243                                                RF_ACTIVE);
244
245             ACPI_DEBUG_PRINT((ACPI_DB_IO, "acpi_cpu%d: throttling with P_BLK at 0x%x/%d%s\n", 
246                               device_get_unit(sc->cpu_dev), p_blk, p_blk_length,
247                               sc->cpu_p_blk ? "" : " (shadowed)"));
248         }
249     }
250     return_VALUE(0);
251 }
252
253 /*
254  * Call this *after* all CPUs have been attached.
255  *
256  * Takes the ACPI lock to avoid fighting anyone over the SMI command
257  * port.  Could probably lock less code.
258  */
259 static void
260 acpi_cpu_init_throttling(void *arg)
261 {
262     int cpu_temp_speed;
263     ACPI_LOCK_DECL;
264
265     ACPI_LOCK;
266
267     /* get set of CPU devices */
268     devclass_get_devices(acpi_cpu_devclass, &cpu_devices, &cpu_ndevices);
269
270     /* initialise throttling states */
271     cpu_max_state = CPU_MAX_SPEED;
272     cpu_performance_state = cpu_max_state;
273     cpu_economy_state = cpu_performance_state / 2;
274     if (cpu_economy_state == 0)         /* 0 is 'reserved' */
275         cpu_economy_state++;
276     if (TUNABLE_INT_FETCH("hw.acpi.cpu.performance_speed",
277         &cpu_temp_speed) && cpu_temp_speed > 0 &&
278         cpu_temp_speed <= cpu_max_state)
279         cpu_performance_state = cpu_temp_speed;
280     if (TUNABLE_INT_FETCH("hw.acpi.cpu.economy_speed",
281         &cpu_temp_speed) && cpu_temp_speed > 0 &&
282         cpu_temp_speed <= cpu_max_state)
283         cpu_economy_state = cpu_temp_speed;
284
285     /* register performance profile change handler */
286     EVENTHANDLER_REGISTER(power_profile_change, acpi_cpu_power_profile, NULL, 0);
287
288     /* if ACPI 2.0+, signal platform that we are taking over throttling */
289     if (cpu_pstate_cnt != 0) {
290         /* XXX should be a generic interface for this */
291         AcpiOsWritePort(cpu_smi_cmd, cpu_pstate_cnt, 8);
292     }
293
294     ACPI_UNLOCK;
295
296     /* set initial speed */
297     acpi_cpu_power_profile(NULL);
298     
299     printf("acpi_cpu: throttling enabled, %d steps (100%% to %d.%d%%), " 
300            "currently %d.%d%%\n", CPU_MAX_SPEED, CPU_SPEED_PRINTABLE(1),
301            CPU_SPEED_PRINTABLE(cpu_current_state));
302 }
303
304 /*
305  * Set CPUs to the new state.
306  *
307  * Must be called with the ACPI lock held.
308  */
309 static void
310 acpi_cpu_set_speed(u_int32_t speed)
311 {
312     struct acpi_cpu_softc       *sc;
313     int                         i;
314     u_int32_t                   p_cnt, clk_val;
315
316     ACPI_ASSERTLOCK;
317
318     /* iterate over processors */
319     for (i = 0; i < cpu_ndevices; i++) {
320         sc = device_get_softc(cpu_devices[i]);
321         if (sc->cpu_p_blk == NULL)
322             continue;
323
324         /* get the current P_CNT value and disable throttling */
325         p_cnt = CPU_GET_P_CNT(sc);
326         p_cnt &= ~CPU_P_CNT_THT_EN;
327         CPU_SET_P_CNT(sc, p_cnt);
328
329         /* if we're at maximum speed, that's all */
330         if (speed < CPU_MAX_SPEED) {
331
332             /* mask the old CLK_VAL off and or-in the new value */
333             clk_val = CPU_MAX_SPEED << cpu_duty_offset;
334             p_cnt &= ~clk_val;
335             p_cnt |= (speed << cpu_duty_offset);
336         
337             /* write the new P_CNT value and then enable throttling */
338             CPU_SET_P_CNT(sc, p_cnt);
339             p_cnt |= CPU_P_CNT_THT_EN;
340             CPU_SET_P_CNT(sc, p_cnt);
341         }
342         ACPI_VPRINT(sc->cpu_dev, acpi_device_get_parent_softc(sc->cpu_dev),
343             "set speed to %d.%d%%\n", CPU_SPEED_PRINTABLE(speed));
344     }
345     cpu_current_state = speed;
346 }
347
348 /*
349  * Power profile change hook.
350  *
351  * Uses the ACPI lock to avoid reentrancy.
352  */
353 static void
354 acpi_cpu_power_profile(void *arg)
355 {
356     int         state;
357     u_int32_t   new;
358     ACPI_LOCK_DECL;
359
360     state = power_profile_get_state();
361     if (state != POWER_PROFILE_PERFORMANCE &&
362         state != POWER_PROFILE_ECONOMY) {
363         return;
364     }
365
366     ACPI_LOCK;
367     
368     switch (state) {
369     case POWER_PROFILE_PERFORMANCE:
370         new = cpu_performance_state;
371         break;
372     case POWER_PROFILE_ECONOMY:
373         new = cpu_economy_state;
374         break;
375     default:
376         new = cpu_current_state;
377         break;
378     }
379
380     if (cpu_current_state != new)
381         acpi_cpu_set_speed(new);
382
383     ACPI_UNLOCK;
384 }
385
386 /*
387  * Handle changes in the performance/ecomony CPU settings.
388  *
389  * Does not need the ACPI lock (although setting *argp should
390  * probably be atomic).
391  */
392 static int
393 acpi_cpu_speed_sysctl(SYSCTL_HANDLER_ARGS)
394 {
395     u_int32_t   *argp;
396     u_int32_t   arg;
397     int         error;
398
399     argp = (u_int32_t *)oidp->oid_arg1;
400     arg = *argp;
401     error = sysctl_handle_int(oidp, &arg, 0, req);
402
403     /* error or no new value */
404     if ((error != 0) || (req->newptr == NULL))
405         return(error);
406     
407     /* range check */
408     if ((arg < 1) || (arg > cpu_max_state))
409         return(EINVAL);
410
411     /* set new value and possibly switch */
412     *argp = arg;
413     acpi_cpu_power_profile(NULL);
414
415     return(0);
416 }