2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Sepherosa Ziehau <sepherosa@gmail.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/globaldata.h>
40 #include <machine/md_var.h>
41 #include <machine/cpufunc.h>
42 #include <machine/cpufreq.h>
43 #include <machine/specialreg.h>
46 #include "acpi_cpu_pstate.h"
48 #define AMD_APMI_HWPSTATE 0x80
50 #define AMD_MSR_PSTATE_CSR_MASK 0x7ULL
51 #define AMD1X_MSR_PSTATE_CTL 0xc0010062
52 #define AMD1X_MSR_PSTATE_ST 0xc0010063
54 #define AMD_MSR_PSTATE_EN 0x8000000000000000ULL
56 #define AMD10_MSR_PSTATE_START 0xc0010064
57 #define AMD10_MSR_PSTATE_COUNT 5
59 #define AMD0F_PST_CTL_FID(cval) (((cval) >> 0) & 0x3f)
60 #define AMD0F_PST_CTL_VID(cval) (((cval) >> 6) & 0x1f)
61 #define AMD0F_PST_CTL_VST(cval) (((cval) >> 11) & 0x7f)
62 #define AMD0F_PST_CTL_MVS(cval) (((cval) >> 18) & 0x3)
63 #define AMD0F_PST_CTL_PLLTIME(cval) (((cval) >> 20) & 0x7f)
64 #define AMD0F_PST_CTL_RVO(cval) (((cval) >> 28) & 0x3)
65 #define AMD0F_PST_CTL_IRT(cval) (((cval) >> 30) & 0x3)
67 #define AMD0F_PST_ST_FID(sval) (((sval) >> 0) & 0x3f)
68 #define AMD0F_PST_ST_VID(sval) (((sval) >> 6) & 0x3f)
70 #define INTEL_MSR_MISC_ENABLE 0x1a0
71 #define INTEL_MSR_MISC_EST_EN 0x10000ULL
73 #define INTEL_MSR_PERF_STATUS 0x198
74 #define INTEL_MSR_PERF_CTL 0x199
75 #define INTEL_MSR_PERF_MASK 0xffffULL
77 static const struct acpi_pst_md *
78 acpi_pst_amd_probe(void);
79 static int acpi_pst_amd_check_csr(const struct acpi_pst_res *,
80 const struct acpi_pst_res *);
81 static int acpi_pst_amd1x_check_pstates(const struct acpi_pstate *, int,
83 static int acpi_pst_amd10_check_pstates(const struct acpi_pstate *, int);
84 static int acpi_pst_amd0f_check_pstates(const struct acpi_pstate *, int);
85 static int acpi_pst_amd_init(const struct acpi_pst_res *,
86 const struct acpi_pst_res *);
87 static int acpi_pst_amd1x_set_pstate(const struct acpi_pst_res *,
88 const struct acpi_pst_res *, const struct acpi_pstate *);
89 static int acpi_pst_amd0f_set_pstate(const struct acpi_pst_res *,
90 const struct acpi_pst_res *, const struct acpi_pstate *);
91 static const struct acpi_pstate *
92 acpi_pst_amd1x_get_pstate(const struct acpi_pst_res *,
93 const struct acpi_pstate *, int);
94 static const struct acpi_pstate *
95 acpi_pst_amd0f_get_pstate(const struct acpi_pst_res *,
96 const struct acpi_pstate *, int);
98 static const struct acpi_pst_md *
99 acpi_pst_intel_probe(void);
100 static int acpi_pst_intel_check_csr(const struct acpi_pst_res *,
101 const struct acpi_pst_res *);
102 static int acpi_pst_intel_check_pstates(const struct acpi_pstate *, int);
103 static int acpi_pst_intel_init(const struct acpi_pst_res *,
104 const struct acpi_pst_res *);
105 static int acpi_pst_intel_set_pstate(const struct acpi_pst_res *,
106 const struct acpi_pst_res *, const struct acpi_pstate *);
107 static const struct acpi_pstate *
108 acpi_pst_intel_get_pstate(const struct acpi_pst_res *,
109 const struct acpi_pstate *, int);
111 static int acpi_pst_md_gas_asz(const ACPI_GENERIC_ADDRESS *);
112 static int acpi_pst_md_gas_verify(const ACPI_GENERIC_ADDRESS *);
113 static uint32_t acpi_pst_md_res_read(const struct acpi_pst_res *);
114 static void acpi_pst_md_res_write(const struct acpi_pst_res *, uint32_t);
116 static const struct acpi_pst_md acpi_pst_amd10 = {
117 .pmd_check_csr = acpi_pst_amd_check_csr,
118 .pmd_check_pstates = acpi_pst_amd10_check_pstates,
119 .pmd_init = acpi_pst_amd_init,
120 .pmd_set_pstate = acpi_pst_amd1x_set_pstate,
121 .pmd_get_pstate = acpi_pst_amd1x_get_pstate
124 static const struct acpi_pst_md acpi_pst_amd0f = {
125 .pmd_check_csr = acpi_pst_amd_check_csr,
126 .pmd_check_pstates = acpi_pst_amd0f_check_pstates,
127 .pmd_init = acpi_pst_amd_init,
128 .pmd_set_pstate = acpi_pst_amd0f_set_pstate,
129 .pmd_get_pstate = acpi_pst_amd0f_get_pstate
132 static const struct acpi_pst_md acpi_pst_intel = {
133 .pmd_check_csr = acpi_pst_intel_check_csr,
134 .pmd_check_pstates = acpi_pst_intel_check_pstates,
135 .pmd_init = acpi_pst_intel_init,
136 .pmd_set_pstate = acpi_pst_intel_set_pstate,
137 .pmd_get_pstate = acpi_pst_intel_get_pstate
140 static int acpi_pst_stringent_check = 1;
141 TUNABLE_INT("hw.acpi.cpu.pstate.strigent_check", &acpi_pst_stringent_check);
143 const struct acpi_pst_md *
144 acpi_pst_md_probe(void)
146 if (strcmp(cpu_vendor, "AuthenticAMD") == 0)
147 return acpi_pst_amd_probe();
148 else if (strcmp(cpu_vendor, "GenuineIntel") == 0)
149 return acpi_pst_intel_probe();
153 static const struct acpi_pst_md *
154 acpi_pst_amd_probe(void)
156 uint32_t regs[4], ext_family;
158 if ((cpu_id & 0x00000f00) != 0x00000f00)
161 /* Check whether APMI exists */
162 do_cpuid(0x80000000, regs);
163 if (regs[0] < 0x80000007)
167 do_cpuid(0x80000007, regs);
169 ext_family = cpu_id & 0x0ff00000;
170 switch (ext_family) {
171 case 0x00000000: /* Family 0fh */
172 if ((regs[3] & 0x06) == 0x06)
173 return &acpi_pst_amd0f;
176 case 0x00100000: /* Family 10h */
178 return &acpi_pst_amd10;
188 acpi_pst_amd_check_csr(const struct acpi_pst_res *ctrl,
189 const struct acpi_pst_res *status)
191 if (ctrl->pr_gas.SpaceId != ACPI_ADR_SPACE_FIXED_HARDWARE) {
192 kprintf("cpu%d: Invalid P-State control register\n", mycpuid);
195 if (status->pr_gas.SpaceId != ACPI_ADR_SPACE_FIXED_HARDWARE) {
196 kprintf("cpu%d: Invalid P-State status register\n", mycpuid);
203 acpi_pst_amd1x_check_pstates(const struct acpi_pstate *pstates, int npstates,
204 uint32_t msr_start, uint32_t msr_end)
209 * Make sure that related MSR P-State registers are enabled.
212 * We don't check status register value here;
213 * it will not be used.
215 for (i = 0; i < npstates; ++i) {
220 (pstates[i].st_cval & AMD_MSR_PSTATE_CSR_MASK);
221 if (msr >= msr_end) {
222 kprintf("cpu%d: MSR P-State register %#08x "
223 "does not exist\n", mycpuid, msr);
228 if ((pstate & AMD_MSR_PSTATE_EN) == 0) {
229 kprintf("cpu%d: MSR P-State register %#08x "
230 "is not enabled\n", mycpuid, msr);
238 acpi_pst_amd10_check_pstates(const struct acpi_pstate *pstates, int npstates)
240 /* Only P0-P4 are supported */
241 if (npstates > AMD10_MSR_PSTATE_COUNT) {
242 kprintf("cpu%d: only P0-P4 is allowed\n", mycpuid);
246 return acpi_pst_amd1x_check_pstates(pstates, npstates,
247 AMD10_MSR_PSTATE_START,
248 AMD10_MSR_PSTATE_START + AMD10_MSR_PSTATE_COUNT);
252 acpi_pst_amd1x_set_pstate(const struct acpi_pst_res *ctrl __unused,
253 const struct acpi_pst_res *status __unused,
254 const struct acpi_pstate *pstate)
258 cval = pstate->st_cval & AMD_MSR_PSTATE_CSR_MASK;
259 wrmsr(AMD1X_MSR_PSTATE_CTL, cval);
262 * Don't check AMD1X_MSR_PSTATE_ST here, since it is
263 * affected by various P-State limits.
266 * AMD Family 10h Processor BKDG Rev 3.20 (#31116)
267 * 2.4.2.4 P-state Transition Behavior
273 static const struct acpi_pstate *
274 acpi_pst_amd1x_get_pstate(const struct acpi_pst_res *status __unused,
275 const struct acpi_pstate *pstates, int npstates)
280 sval = rdmsr(AMD1X_MSR_PSTATE_ST) & AMD_MSR_PSTATE_CSR_MASK;
281 for (i = 0; i < npstates; ++i) {
282 if ((pstates[i].st_sval & AMD_MSR_PSTATE_CSR_MASK) == sval)
289 acpi_pst_amd0f_check_pstates(const struct acpi_pstate *pstates, int npstates)
291 struct amd0f_fidvid fv_max, fv_min;
294 amd0f_fidvid_limit(&fv_min, &fv_max);
296 if (fv_min.fid == fv_max.fid && fv_min.vid == fv_max.vid) {
297 kprintf("cpu%d: only one P-State is supported\n", mycpuid);
298 if (acpi_pst_stringent_check)
302 for (i = 0; i < npstates; ++i) {
303 const struct acpi_pstate *p = &pstates[i];
304 uint32_t fid, vid, mvs, rvo;
307 fid = AMD0F_PST_CTL_FID(p->st_cval);
308 vid = AMD0F_PST_CTL_VID(p->st_cval);
311 if (vid != fv_max.vid) {
312 kprintf("cpu%d: max VID mismatch "
313 "real %u, lim %d\n", mycpuid,
316 if (fid != fv_max.fid) {
317 kprintf("cpu%d: max FID mismatch "
318 "real %u, lim %d\n", mycpuid,
321 } else if (i == npstates - 1) {
322 if (vid != fv_min.vid) {
323 kprintf("cpu%d: min VID mismatch "
324 "real %u, lim %d\n", mycpuid,
327 if (fid != fv_min.fid) {
328 kprintf("cpu%d: min FID mismatch "
329 "real %u, lim %d\n", mycpuid,
333 if (fid >= fv_max.fid || fid < (fv_min.fid + 0x8)) {
334 kprintf("cpu%d: Invalid FID %#x, "
335 "out [%#x, %#x]\n", mycpuid, fid,
336 fv_min.fid + 0x8, fv_max.fid);
337 if (acpi_pst_stringent_check)
340 if (vid < fv_max.vid || vid > fv_min.vid) {
341 kprintf("cpu%d: Invalid VID %#x, "
342 "in [%#x, %#x]\n", mycpuid, vid,
343 fv_max.vid, fv_min.vid);
344 if (acpi_pst_stringent_check)
349 mvs = AMD0F_PST_CTL_MVS(p->st_cval);
350 rvo = AMD0F_PST_CTL_RVO(p->st_cval);
352 /* Only 0 is allowed, i.e. 25mV stepping */
354 kprintf("cpu%d: Invalid MVS %#x\n", mycpuid, mvs);
359 mvs_mv = 25 * (1 << mvs);
361 if (rvo_mv % mvs_mv != 0) {
362 kprintf("cpu%d: Invalid MVS/RVO (%#x/%#x)\n",
371 acpi_pst_amd0f_set_pstate(const struct acpi_pst_res *ctrl __unused,
372 const struct acpi_pst_res *status __unused,
373 const struct acpi_pstate *pstate)
375 struct amd0f_fidvid fv;
376 struct amd0f_xsit xsit;
378 fv.fid = AMD0F_PST_CTL_FID(pstate->st_cval);
379 fv.vid = AMD0F_PST_CTL_VID(pstate->st_cval);
381 xsit.rvo = AMD0F_PST_CTL_RVO(pstate->st_cval);
382 xsit.mvs = AMD0F_PST_CTL_MVS(pstate->st_cval);
383 xsit.vst = AMD0F_PST_CTL_VST(pstate->st_cval);
384 xsit.pll_time = AMD0F_PST_CTL_PLLTIME(pstate->st_cval);
385 xsit.irt = AMD0F_PST_CTL_IRT(pstate->st_cval);
387 return amd0f_set_fidvid(&fv, &xsit);
390 static const struct acpi_pstate *
391 acpi_pst_amd0f_get_pstate(const struct acpi_pst_res *status __unused,
392 const struct acpi_pstate *pstates, int npstates)
394 struct amd0f_fidvid fv;
397 error = amd0f_get_fidvid(&fv);
401 for (i = 0; i < npstates; ++i) {
402 const struct acpi_pstate *p = &pstates[i];
404 if (fv.fid == AMD0F_PST_ST_FID(p->st_sval) &&
405 fv.vid == AMD0F_PST_ST_VID(p->st_sval))
412 acpi_pst_amd_init(const struct acpi_pst_res *ctrl __unused,
413 const struct acpi_pst_res *status __unused)
418 static const struct acpi_pst_md *
419 acpi_pst_intel_probe(void)
423 if ((cpu_feature2 & CPUID2_EST) == 0)
426 family = cpu_id & 0xf00;
427 if (family != 0xf00 && family != 0x600)
429 return &acpi_pst_intel;
433 acpi_pst_intel_check_csr(const struct acpi_pst_res *ctrl,
434 const struct acpi_pst_res *status)
438 if (ctrl->pr_gas.SpaceId != status->pr_gas.SpaceId) {
439 kprintf("cpu%d: P-State control(%d)/status(%d) registers have "
440 "different SpaceId", mycpuid,
441 ctrl->pr_gas.SpaceId, status->pr_gas.SpaceId);
445 switch (ctrl->pr_gas.SpaceId) {
446 case ACPI_ADR_SPACE_FIXED_HARDWARE:
447 if (ctrl->pr_res != NULL || status->pr_res != NULL) {
448 /* XXX should panic() */
449 kprintf("cpu%d: Allocated resource for fixed hardware "
450 "registers\n", mycpuid);
455 case ACPI_ADR_SPACE_SYSTEM_IO:
456 if (ctrl->pr_res == NULL) {
457 kprintf("cpu%d: ioport allocation failed for control "
458 "register\n", mycpuid);
461 error = acpi_pst_md_gas_verify(&ctrl->pr_gas);
463 kprintf("cpu%d: Invalid control register GAS\n",
468 if (status->pr_res == NULL) {
469 kprintf("cpu%d: ioport allocation failed for status "
470 "register\n", mycpuid);
473 error = acpi_pst_md_gas_verify(&status->pr_gas);
475 kprintf("cpu%d: Invalid status register GAS\n",
482 kprintf("cpu%d: Invalid P-State control/status register "
483 "SpaceId %d\n", mycpuid, ctrl->pr_gas.SpaceId);
490 acpi_pst_intel_check_pstates(const struct acpi_pstate *pstates __unused,
491 int npstates __unused)
497 acpi_pst_intel_init(const struct acpi_pst_res *ctrl __unused,
498 const struct acpi_pst_res *status __unused)
500 uint32_t family, model;
501 uint64_t misc_enable;
503 family = cpu_id & 0xf00;
504 if (family == 0xf00) {
505 /* EST enable bit is reserved in INTEL_MSR_MISC_ENABLE */
508 KKASSERT(family == 0x600);
510 model = ((cpu_id & 0xf0000) >> 12) | ((cpu_id & 0xf0) >> 4);
512 /* EST enable bit is reserved in INTEL_MSR_MISC_ENABLE */
516 misc_enable = rdmsr(INTEL_MSR_MISC_ENABLE);
517 if ((misc_enable & INTEL_MSR_MISC_EST_EN) == 0) {
518 misc_enable |= INTEL_MSR_MISC_EST_EN;
519 wrmsr(INTEL_MSR_MISC_ENABLE, misc_enable);
521 misc_enable = rdmsr(INTEL_MSR_MISC_ENABLE);
522 if ((misc_enable & INTEL_MSR_MISC_EST_EN) == 0) {
523 kprintf("cpu%d: Can't enable EST\n", mycpuid);
531 acpi_pst_intel_set_pstate(const struct acpi_pst_res *ctrl,
532 const struct acpi_pst_res *status __unused,
533 const struct acpi_pstate *pstate)
535 if (ctrl->pr_res != NULL) {
536 acpi_pst_md_res_write(ctrl, pstate->st_cval);
540 ctl = rdmsr(INTEL_MSR_PERF_CTL);
541 ctl &= ~INTEL_MSR_PERF_MASK;
542 ctl |= (pstate->st_cval & INTEL_MSR_PERF_MASK);
543 wrmsr(INTEL_MSR_PERF_CTL, ctl);
548 static const struct acpi_pstate *
549 acpi_pst_intel_get_pstate(const struct acpi_pst_res *status,
550 const struct acpi_pstate *pstates, int npstates)
554 if (status->pr_res != NULL) {
557 st = acpi_pst_md_res_read(status);
558 for (i = 0; i < npstates; ++i) {
559 if (pstates[i].st_sval == st)
565 sval = rdmsr(INTEL_MSR_PERF_STATUS) & INTEL_MSR_PERF_MASK;
566 for (i = 0; i < npstates; ++i) {
567 if ((pstates[i].st_sval & INTEL_MSR_PERF_MASK) == sval)
575 acpi_pst_md_gas_asz(const ACPI_GENERIC_ADDRESS *gas)
579 if (gas->AccessWidth != 0)
580 asz = gas->AccessWidth;
582 asz = gas->BitWidth / NBBY;
596 acpi_pst_md_gas_verify(const ACPI_GENERIC_ADDRESS *gas)
600 if (gas->BitOffset % NBBY != 0)
603 end = gas->BitWidth / NBBY;
604 reg = gas->BitOffset / NBBY;
609 asz = acpi_pst_md_gas_asz(gas);
619 acpi_pst_md_res_read(const struct acpi_pst_res *res)
623 KKASSERT(res->pr_res != NULL);
624 asz = acpi_pst_md_gas_asz(&res->pr_gas);
625 reg = res->pr_gas.BitOffset / NBBY;
629 return bus_space_read_1(res->pr_bt, res->pr_bh, reg);
631 return bus_space_read_2(res->pr_bt, res->pr_bh, reg);
633 return bus_space_read_4(res->pr_bt, res->pr_bh, reg);
635 panic("unsupported access width %d\n", asz);
642 acpi_pst_md_res_write(const struct acpi_pst_res *res, uint32_t val)
646 KKASSERT(res->pr_res != NULL);
647 asz = acpi_pst_md_gas_asz(&res->pr_gas);
648 reg = res->pr_gas.BitOffset / NBBY;
652 bus_space_write_1(res->pr_bt, res->pr_bh, reg, val);
655 bus_space_write_2(res->pr_bt, res->pr_bh, reg, val);
658 bus_space_write_4(res->pr_bt, res->pr_bh, reg, val);
661 panic("unsupported access width %d\n", asz);