powerd: Wait hw.acpi.cpu.px_dom* sysctl to be created by kernel
[dragonfly.git] / sys / platform / pc64 / acpica5 / acpi_pstate_machdep.c
CommitLineData
e774ca6d
MD
1/*
2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Sepherosa Ziehau <sepherosa@gmail.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/globaldata.h>
38
39#include <machine/md_var.h>
40#include <machine/cpufunc.h>
41#include <machine/cpufreq.h>
42#include <machine/specialreg.h>
43
44#include "acpi.h"
45#include "acpi_cpu_pstate.h"
46
47#define AMD_APMI_HWPSTATE 0x80
48
49#define AMD_MSR_PSTATE_CSR_MASK 0x7ULL
50#define AMD1X_MSR_PSTATE_CTL 0xc0010062
51#define AMD1X_MSR_PSTATE_ST 0xc0010063
52
53#define AMD_MSR_PSTATE_EN 0x8000000000000000ULL
54
55#define AMD10_MSR_PSTATE_START 0xc0010064
56#define AMD10_MSR_PSTATE_COUNT 5
57
58#define AMD0F_PST_CTL_FID(cval) (((cval) >> 0) & 0x3f)
59#define AMD0F_PST_CTL_VID(cval) (((cval) >> 6) & 0x1f)
60#define AMD0F_PST_CTL_VST(cval) (((cval) >> 11) & 0x7f)
61#define AMD0F_PST_CTL_MVS(cval) (((cval) >> 18) & 0x3)
62#define AMD0F_PST_CTL_PLLTIME(cval) (((cval) >> 20) & 0x7f)
63#define AMD0F_PST_CTL_RVO(cval) (((cval) >> 28) & 0x3)
64#define AMD0F_PST_CTL_IRT(cval) (((cval) >> 30) & 0x3)
65
66#define AMD0F_PST_ST_FID(sval) (((sval) >> 0) & 0x3f)
67#define AMD0F_PST_ST_VID(sval) (((sval) >> 6) & 0x3f)
68
69#define INTEL_MSR_MISC_ENABLE 0x1a0
70#define INTEL_MSR_MISC_EST_EN 0x10000ULL
71
72#define INTEL_MSR_PERF_STATUS 0x198
73#define INTEL_MSR_PERF_CTL 0x199
74#define INTEL_MSR_PERF_MASK 0xffffULL
75
76static const struct acpi_pst_md *
77 acpi_pst_amd_probe(void);
78static int acpi_pst_amd_check_csr(const struct acpi_pst_res *,
79 const struct acpi_pst_res *);
80static int acpi_pst_amd1x_check_pstates(const struct acpi_pstate *, int,
81 uint32_t, uint32_t);
82static int acpi_pst_amd10_check_pstates(const struct acpi_pstate *, int);
83static int acpi_pst_amd0f_check_pstates(const struct acpi_pstate *, int);
84static int acpi_pst_amd_init(const struct acpi_pst_res *,
85 const struct acpi_pst_res *);
86static int acpi_pst_amd1x_set_pstate(const struct acpi_pst_res *,
87 const struct acpi_pst_res *, const struct acpi_pstate *);
88static int acpi_pst_amd0f_set_pstate(const struct acpi_pst_res *,
89 const struct acpi_pst_res *, const struct acpi_pstate *);
90static const struct acpi_pstate *
91 acpi_pst_amd1x_get_pstate(const struct acpi_pst_res *,
92 const struct acpi_pstate *, int);
93static const struct acpi_pstate *
94 acpi_pst_amd0f_get_pstate(const struct acpi_pst_res *,
95 const struct acpi_pstate *, int);
96
97static const struct acpi_pst_md *
98 acpi_pst_intel_probe(void);
99static int acpi_pst_intel_check_csr(const struct acpi_pst_res *,
100 const struct acpi_pst_res *);
101static int acpi_pst_intel_check_pstates(const struct acpi_pstate *, int);
102static int acpi_pst_intel_init(const struct acpi_pst_res *,
103 const struct acpi_pst_res *);
104static int acpi_pst_intel_set_pstate(const struct acpi_pst_res *,
105 const struct acpi_pst_res *, const struct acpi_pstate *);
106static const struct acpi_pstate *
107 acpi_pst_intel_get_pstate(const struct acpi_pst_res *,
108 const struct acpi_pstate *, int);
109
110static int acpi_pst_md_gas_asz(const ACPI_GENERIC_ADDRESS *);
111static int acpi_pst_md_gas_verify(const ACPI_GENERIC_ADDRESS *);
112static uint32_t acpi_pst_md_res_read(const struct acpi_pst_res *);
113static void acpi_pst_md_res_write(const struct acpi_pst_res *, uint32_t);
114
115static const struct acpi_pst_md acpi_pst_amd10 = {
116 .pmd_check_csr = acpi_pst_amd_check_csr,
117 .pmd_check_pstates = acpi_pst_amd10_check_pstates,
118 .pmd_init = acpi_pst_amd_init,
119 .pmd_set_pstate = acpi_pst_amd1x_set_pstate,
120 .pmd_get_pstate = acpi_pst_amd1x_get_pstate
121};
122
123static const struct acpi_pst_md acpi_pst_amd0f = {
124 .pmd_check_csr = acpi_pst_amd_check_csr,
125 .pmd_check_pstates = acpi_pst_amd0f_check_pstates,
126 .pmd_init = acpi_pst_amd_init,
127 .pmd_set_pstate = acpi_pst_amd0f_set_pstate,
128 .pmd_get_pstate = acpi_pst_amd0f_get_pstate
129};
130
131static const struct acpi_pst_md acpi_pst_intel = {
132 .pmd_check_csr = acpi_pst_intel_check_csr,
133 .pmd_check_pstates = acpi_pst_intel_check_pstates,
134 .pmd_init = acpi_pst_intel_init,
135 .pmd_set_pstate = acpi_pst_intel_set_pstate,
136 .pmd_get_pstate = acpi_pst_intel_get_pstate
137};
138
139const struct acpi_pst_md *
140acpi_pst_md_probe(void)
141{
142 if (strcmp(cpu_vendor, "AuthenticAMD") == 0)
143 return acpi_pst_amd_probe();
144 else if (strcmp(cpu_vendor, "GenuineIntel") == 0)
145 return acpi_pst_intel_probe();
146 return NULL;
147}
148
149static const struct acpi_pst_md *
150acpi_pst_amd_probe(void)
151{
152 uint32_t regs[4], ext_family;
153
154 if ((cpu_id & 0x00000f00) != 0x00000f00)
155 return NULL;
156
157 /* Check whether APMI exists */
158 do_cpuid(0x80000000, regs);
159 if (regs[0] < 0x80000007)
160 return NULL;
161
162 /* Fetch APMI */
163 do_cpuid(0x80000007, regs);
164
165 ext_family = cpu_id & 0x0ff00000;
166 switch (ext_family) {
167 case 0x00000000: /* Family 0fh */
168 if ((regs[3] & 0x06) == 0x06)
169 return &acpi_pst_amd0f;
170 break;
171
172 case 0x00100000: /* Family 10h */
173 if (regs[3] & 0x80)
174 return &acpi_pst_amd10;
175 break;
176
177 default:
178 break;
179 }
180 return NULL;
181}
182
183static int
184acpi_pst_amd_check_csr(const struct acpi_pst_res *ctrl,
185 const struct acpi_pst_res *status)
186{
187 if (ctrl->pr_gas.SpaceId != ACPI_ADR_SPACE_FIXED_HARDWARE) {
188 kprintf("cpu%d: Invalid P-State control register\n", mycpuid);
189 return EINVAL;
190 }
191 if (status->pr_gas.SpaceId != ACPI_ADR_SPACE_FIXED_HARDWARE) {
192 kprintf("cpu%d: Invalid P-State status register\n", mycpuid);
193 return EINVAL;
194 }
195 return 0;
196}
197
198static int
199acpi_pst_amd1x_check_pstates(const struct acpi_pstate *pstates, int npstates,
200 uint32_t msr_start, uint32_t msr_end)
201{
202 int i;
203
204 /*
205 * Make sure that related MSR P-State registers are enabled.
206 *
207 * NOTE:
208 * We don't check status register value here;
209 * it will not be used.
210 */
211 for (i = 0; i < npstates; ++i) {
212 uint64_t pstate;
213 uint32_t msr;
214
215 msr = msr_start +
216 (pstates[i].st_cval & AMD_MSR_PSTATE_CSR_MASK);
217 if (msr >= msr_end) {
218 kprintf("cpu%d: MSR P-State register %#08x "
219 "does not exist\n", mycpuid, msr);
220 return EINVAL;
221 }
222
223 pstate = rdmsr(msr);
224 if ((pstate & AMD_MSR_PSTATE_EN) == 0) {
225 kprintf("cpu%d: MSR P-State register %#08x "
226 "is not enabled\n", mycpuid, msr);
227 return EINVAL;
228 }
229 }
230 return 0;
231}
232
233static int
234acpi_pst_amd10_check_pstates(const struct acpi_pstate *pstates, int npstates)
235{
236 /* Only P0-P4 are supported */
237 if (npstates > AMD10_MSR_PSTATE_COUNT) {
238 kprintf("cpu%d: only P0-P4 is allowed\n", mycpuid);
239 return EINVAL;
240 }
241
242 return acpi_pst_amd1x_check_pstates(pstates, npstates,
243 AMD10_MSR_PSTATE_START,
244 AMD10_MSR_PSTATE_START + AMD10_MSR_PSTATE_COUNT);
245}
246
247static int
248acpi_pst_amd1x_set_pstate(const struct acpi_pst_res *ctrl __unused,
249 const struct acpi_pst_res *status __unused,
250 const struct acpi_pstate *pstate)
251{
252 uint64_t cval;
253
254 cval = pstate->st_cval & AMD_MSR_PSTATE_CSR_MASK;
255 wrmsr(AMD1X_MSR_PSTATE_CTL, cval);
256
257 /*
258 * Don't check AMD1X_MSR_PSTATE_ST here, since it is
259 * affected by various P-State limits.
260 *
261 * For details:
262 * AMD Family 10h Processor BKDG Rev 3.20 (#31116)
263 * 2.4.2.4 P-state Transition Behavior
264 */
265
266 return 0;
267}
268
269static const struct acpi_pstate *
270acpi_pst_amd1x_get_pstate(const struct acpi_pst_res *status __unused,
271 const struct acpi_pstate *pstates, int npstates)
272{
273 uint64_t sval;
274 int i;
275
276 sval = rdmsr(AMD1X_MSR_PSTATE_ST) & AMD_MSR_PSTATE_CSR_MASK;
277 for (i = 0; i < npstates; ++i) {
278 if ((pstates[i].st_sval & AMD_MSR_PSTATE_CSR_MASK) == sval)
279 return &pstates[i];
280 }
281 return NULL;
282}
283
284static int
285acpi_pst_amd0f_check_pstates(const struct acpi_pstate *pstates, int npstates)
286{
287 struct amd0f_fidvid fv_max, fv_min;
288 int i;
289
290 amd0f_fidvid_limit(&fv_min, &fv_max);
291
ed468a9a
SZ
292 if (fv_min.fid == fv_max.fid && fv_min.vid == fv_max.vid) {
293 kprintf("cpu%d: only one P-State is supported\n", mycpuid);
294 return EOPNOTSUPP;
295 }
296
e774ca6d
MD
297 for (i = 0; i < npstates; ++i) {
298 const struct acpi_pstate *p = &pstates[i];
299 uint32_t fid, vid, mvs, rvo;
300 int mvs_mv, rvo_mv;
301
302 fid = AMD0F_PST_CTL_FID(p->st_cval);
303 vid = AMD0F_PST_CTL_VID(p->st_cval);
304
ed468a9a
SZ
305 if (i == 0) {
306 if (vid != fv_max.vid) {
307 kprintf("cpu%d: max VID mismatch "
308 "real %u, lim %d\n", mycpuid,
309 vid, fv_max.vid);
310 }
311 if (fid != fv_max.fid) {
312 kprintf("cpu%d: max FID mismatch "
313 "real %u, lim %d\n", mycpuid,
314 fid, fv_max.fid);
315 }
316 } else if (i == npstates - 1) {
317 if (vid != fv_min.vid) {
318 kprintf("cpu%d: min VID mismatch "
319 "real %u, lim %d\n", mycpuid,
320 vid, fv_min.vid);
321 }
322 if (fid != fv_min.fid) {
323 kprintf("cpu%d: min FID mismatch "
324 "real %u, lim %d\n", mycpuid,
325 fid, fv_min.fid);
326 }
327 } else {
328 if (fid >= fv_max.fid || fid < (fv_min.fid + 0x8)) {
329 kprintf("cpu%d: Invalid FID %#x, "
330 "out [%#x, %#x]\n", mycpuid, fid,
331 fv_min.fid + 0x8, fv_max.fid);
332 return EINVAL;
333 }
334 if (vid < fv_max.vid || vid > fv_min.vid) {
335 kprintf("cpu%d: Invalid VID %#x, "
336 "in [%#x, %#x]\n", mycpuid, vid,
337 fv_max.vid, fv_min.vid);
338 return EINVAL;
339 }
e774ca6d
MD
340 }
341
342 mvs = AMD0F_PST_CTL_MVS(p->st_cval);
343 rvo = AMD0F_PST_CTL_RVO(p->st_cval);
344
345 /* Only 0 is allowed, i.e. 25mV stepping */
346 if (mvs != 0) {
347 kprintf("cpu%d: Invalid MVS %#x\n", mycpuid, mvs);
348 return EINVAL;
349 }
350
351 /* -> mV */
352 mvs_mv = 25 * (1 << mvs);
353 rvo_mv = 25 * rvo;
354 if (rvo_mv % mvs_mv != 0) {
355 kprintf("cpu%d: Invalid MVS/RVO (%#x/%#x)\n",
356 mycpuid, mvs, rvo);
357 return EINVAL;
358 }
359 }
360 return 0;
361}
362
363static int
364acpi_pst_amd0f_set_pstate(const struct acpi_pst_res *ctrl __unused,
365 const struct acpi_pst_res *status __unused,
366 const struct acpi_pstate *pstate)
367{
368 struct amd0f_fidvid fv;
369 struct amd0f_xsit xsit;
370
371 fv.fid = AMD0F_PST_CTL_FID(pstate->st_cval);
372 fv.vid = AMD0F_PST_CTL_VID(pstate->st_cval);
373
374 xsit.rvo = AMD0F_PST_CTL_RVO(pstate->st_cval);
375 xsit.mvs = AMD0F_PST_CTL_MVS(pstate->st_cval);
376 xsit.vst = AMD0F_PST_CTL_VST(pstate->st_cval);
377 xsit.pll_time = AMD0F_PST_CTL_PLLTIME(pstate->st_cval);
378 xsit.irt = AMD0F_PST_CTL_IRT(pstate->st_cval);
379
380 return amd0f_set_fidvid(&fv, &xsit);
381}
382
383static const struct acpi_pstate *
384acpi_pst_amd0f_get_pstate(const struct acpi_pst_res *status __unused,
385 const struct acpi_pstate *pstates, int npstates)
386{
387 struct amd0f_fidvid fv;
388 int error, i;
389
390 error = amd0f_get_fidvid(&fv);
391 if (error)
392 return NULL;
393
394 for (i = 0; i < npstates; ++i) {
395 const struct acpi_pstate *p = &pstates[i];
396
397 if (fv.fid == AMD0F_PST_ST_FID(p->st_sval) &&
398 fv.vid == AMD0F_PST_ST_VID(p->st_sval))
399 return p;
400 }
401 return NULL;
402}
403
404static int
405acpi_pst_amd_init(const struct acpi_pst_res *ctrl __unused,
406 const struct acpi_pst_res *status __unused)
407{
408 return 0;
409}
410
411static const struct acpi_pst_md *
412acpi_pst_intel_probe(void)
413{
414 uint32_t family;
415
416 if ((cpu_feature2 & CPUID2_EST) == 0)
417 return NULL;
418
419 family = cpu_id & 0xf00;
420 if (family != 0xf00 && family != 0x600)
421 return NULL;
422 return &acpi_pst_intel;
423}
424
425static int
426acpi_pst_intel_check_csr(const struct acpi_pst_res *ctrl,
427 const struct acpi_pst_res *status)
428{
429 int error;
430
431 if (ctrl->pr_gas.SpaceId != status->pr_gas.SpaceId) {
432 kprintf("cpu%d: P-State control(%d)/status(%d) registers have "
433 "different SpaceId", mycpuid,
434 ctrl->pr_gas.SpaceId, status->pr_gas.SpaceId);
435 return EINVAL;
436 }
437
438 switch (ctrl->pr_gas.SpaceId) {
439 case ACPI_ADR_SPACE_FIXED_HARDWARE:
440 if (ctrl->pr_res != NULL || status->pr_res != NULL) {
441 /* XXX should panic() */
442 kprintf("cpu%d: Allocated resource for fixed hardware "
443 "registers\n", mycpuid);
444 return EINVAL;
445 }
446 break;
447
448 case ACPI_ADR_SPACE_SYSTEM_IO:
449 if (ctrl->pr_res == NULL) {
450 kprintf("cpu%d: ioport allocation failed for control "
451 "register\n", mycpuid);
452 return ENXIO;
453 }
454 error = acpi_pst_md_gas_verify(&ctrl->pr_gas);
455 if (error) {
456 kprintf("cpu%d: Invalid control register GAS\n",
457 mycpuid);
458 return error;
459 }
460
461 if (status->pr_res == NULL) {
462 kprintf("cpu%d: ioport allocation failed for status "
463 "register\n", mycpuid);
464 return ENXIO;
465 }
466 error = acpi_pst_md_gas_verify(&status->pr_gas);
467 if (error) {
468 kprintf("cpu%d: Invalid status register GAS\n",
469 mycpuid);
470 return error;
471 }
472 break;
473
474 default:
475 kprintf("cpu%d: Invalid P-State control/status register "
476 "SpaceId %d\n", mycpuid, ctrl->pr_gas.SpaceId);
477 return EOPNOTSUPP;
478 }
479 return 0;
480}
481
482static int
483acpi_pst_intel_check_pstates(const struct acpi_pstate *pstates __unused,
484 int npstates __unused)
485{
486 return 0;
487}
488
489static int
490acpi_pst_intel_init(const struct acpi_pst_res *ctrl __unused,
491 const struct acpi_pst_res *status __unused)
492{
493 uint32_t family, model;
494 uint64_t misc_enable;
495
496 family = cpu_id & 0xf00;
497 if (family == 0xf00) {
498 /* EST enable bit is reserved in INTEL_MSR_MISC_ENABLE */
499 return 0;
500 }
501 KKASSERT(family == 0x600);
502
503 model = ((cpu_id & 0xf0000) >> 12) | ((cpu_id & 0xf0) >> 4);
504 if (model < 0xd) {
505 /* EST enable bit is reserved in INTEL_MSR_MISC_ENABLE */
506 return 0;
507 }
508
509 misc_enable = rdmsr(INTEL_MSR_MISC_ENABLE);
510 if ((misc_enable & INTEL_MSR_MISC_EST_EN) == 0) {
511 misc_enable |= INTEL_MSR_MISC_EST_EN;
512 wrmsr(INTEL_MSR_MISC_ENABLE, misc_enable);
513
514 misc_enable = rdmsr(INTEL_MSR_MISC_ENABLE);
515 if ((misc_enable & INTEL_MSR_MISC_EST_EN) == 0) {
516 kprintf("cpu%d: Can't enable EST\n", mycpuid);
517 return EIO;
518 }
519 }
520 return 0;
521}
522
523static int
524acpi_pst_intel_set_pstate(const struct acpi_pst_res *ctrl,
525 const struct acpi_pst_res *status __unused,
526 const struct acpi_pstate *pstate)
527{
528 if (ctrl->pr_res != NULL) {
529 acpi_pst_md_res_write(ctrl, pstate->st_cval);
530 } else {
531 uint64_t ctl;
532
533 ctl = rdmsr(INTEL_MSR_PERF_CTL);
534 ctl &= ~INTEL_MSR_PERF_MASK;
535 ctl |= (pstate->st_cval & INTEL_MSR_PERF_MASK);
536 wrmsr(INTEL_MSR_PERF_CTL, ctl);
537 }
538 return 0;
539}
540
541static const struct acpi_pstate *
542acpi_pst_intel_get_pstate(const struct acpi_pst_res *status,
543 const struct acpi_pstate *pstates, int npstates)
544{
545 int i;
546
547 if (status->pr_res != NULL) {
548 uint32_t st;
549
550 st = acpi_pst_md_res_read(status);
551 for (i = 0; i < npstates; ++i) {
552 if (pstates[i].st_sval == st)
553 return &pstates[i];
554 }
555 } else {
556 uint64_t sval;
557
558 sval = rdmsr(INTEL_MSR_PERF_STATUS) & INTEL_MSR_PERF_MASK;
559 for (i = 0; i < npstates; ++i) {
560 if ((pstates[i].st_sval & INTEL_MSR_PERF_MASK) == sval)
561 return &pstates[i];
562 }
563 }
564 return NULL;
565}
566
567static int
568acpi_pst_md_gas_asz(const ACPI_GENERIC_ADDRESS *gas)
569{
570 int asz;
571
572 if (gas->AccessWidth != 0)
573 asz = gas->AccessWidth;
574 else
575 asz = gas->BitWidth / NBBY;
576 switch (asz) {
577 case 1:
578 case 2:
579 case 4:
580 break;
581 default:
582 asz = 0;
583 break;
584 }
585 return asz;
586}
587
588static int
589acpi_pst_md_gas_verify(const ACPI_GENERIC_ADDRESS *gas)
590{
591 int reg, end, asz;
592
593 if (gas->BitOffset % NBBY != 0)
594 return EINVAL;
595
596 end = gas->BitWidth / NBBY;
597 reg = gas->BitOffset / NBBY;
598
599 if (reg >= end)
600 return EINVAL;
601
602 asz = acpi_pst_md_gas_asz(gas);
603 if (asz == 0)
604 return EINVAL;
605
606 if (reg + asz > end)
607 return EINVAL;
608 return 0;
609}
610
611static uint32_t
612acpi_pst_md_res_read(const struct acpi_pst_res *res)
613{
614 int asz, reg;
615
616 KKASSERT(res->pr_res != NULL);
617 asz = acpi_pst_md_gas_asz(&res->pr_gas);
618 reg = res->pr_gas.BitOffset / NBBY;
619
620 switch (asz) {
621 case 1:
622 return bus_space_read_1(res->pr_bt, res->pr_bh, reg);
623 case 2:
624 return bus_space_read_2(res->pr_bt, res->pr_bh, reg);
625 case 4:
626 return bus_space_read_4(res->pr_bt, res->pr_bh, reg);
627 }
628 panic("unsupported access width %d\n", asz);
629
630 /* NEVER REACHED */
631 return 0;
632}
633
634static void
635acpi_pst_md_res_write(const struct acpi_pst_res *res, uint32_t val)
636{
637 int asz, reg;
638
639 KKASSERT(res->pr_res != NULL);
640 asz = acpi_pst_md_gas_asz(&res->pr_gas);
641 reg = res->pr_gas.BitOffset / NBBY;
642
643 switch (asz) {
644 case 1:
645 bus_space_write_1(res->pr_bt, res->pr_bh, reg, val);
646 break;
647 case 2:
648 bus_space_write_2(res->pr_bt, res->pr_bh, reg, val);
649 break;
650 case 4:
651 bus_space_write_4(res->pr_bt, res->pr_bh, reg, val);
652 break;
653 default:
654 panic("unsupported access width %d\n", asz);
655 }
656}