Add tap(4) to LINT/LINT64.
[dragonfly.git] / sys / platform / pc64 / acpica5 / acpi_pstate_machdep.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Sepherosa Ziehau <sepherosa@gmail.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#include <sys/param.h>
36#include <sys/kernel.h>
37#include <sys/systm.h>
38#include <sys/globaldata.h>
39
40#include <machine/md_var.h>
41#include <machine/cpufunc.h>
42#include <machine/cpufreq.h>
43#include <machine/cputypes.h>
44#include <machine/specialreg.h>
45
46#include "acpi.h"
47#include "acpi_cpu_pstate.h"
48
49#define AMD_APMI_HWPSTATE 0x80
50
51#define AMD_MSR_PSTATE_CSR_MASK 0x7ULL
52#define AMD1X_MSR_PSTATE_CTL 0xc0010062
53#define AMD1X_MSR_PSTATE_ST 0xc0010063
54
55#define AMD_MSR_PSTATE_EN 0x8000000000000000ULL
56
57#define AMD10_MSR_PSTATE_START 0xc0010064
58#define AMD10_MSR_PSTATE_COUNT 5
59
60#define AMD0F_PST_CTL_FID(cval) (((cval) >> 0) & 0x3f)
61#define AMD0F_PST_CTL_VID(cval) (((cval) >> 6) & 0x1f)
62#define AMD0F_PST_CTL_VST(cval) (((cval) >> 11) & 0x7f)
63#define AMD0F_PST_CTL_MVS(cval) (((cval) >> 18) & 0x3)
64#define AMD0F_PST_CTL_PLLTIME(cval) (((cval) >> 20) & 0x7f)
65#define AMD0F_PST_CTL_RVO(cval) (((cval) >> 28) & 0x3)
66#define AMD0F_PST_CTL_IRT(cval) (((cval) >> 30) & 0x3)
67
68#define AMD0F_PST_ST_FID(sval) (((sval) >> 0) & 0x3f)
69#define AMD0F_PST_ST_VID(sval) (((sval) >> 6) & 0x3f)
70
71#define INTEL_MSR_MISC_ENABLE 0x1a0
72#define INTEL_MSR_MISC_EST_EN 0x10000ULL
73
74#define INTEL_MSR_PERF_STATUS 0x198
75#define INTEL_MSR_PERF_CTL 0x199
76#define INTEL_MSR_PERF_MASK 0xffffULL
77
78static const struct acpi_pst_md *
79 acpi_pst_amd_probe(void);
80static int acpi_pst_amd_check_csr(const struct acpi_pst_res *,
81 const struct acpi_pst_res *);
82static int acpi_pst_amd1x_check_pstates(const struct acpi_pstate *, int,
83 uint32_t, uint32_t);
84static int acpi_pst_amd10_check_pstates(const struct acpi_pstate *, int);
85static int acpi_pst_amd0f_check_pstates(const struct acpi_pstate *, int);
86static int acpi_pst_amd_init(const struct acpi_pst_res *,
87 const struct acpi_pst_res *);
88static int acpi_pst_amd1x_set_pstate(const struct acpi_pst_res *,
89 const struct acpi_pst_res *, const struct acpi_pstate *);
90static int acpi_pst_amd0f_set_pstate(const struct acpi_pst_res *,
91 const struct acpi_pst_res *, const struct acpi_pstate *);
92static const struct acpi_pstate *
93 acpi_pst_amd1x_get_pstate(const struct acpi_pst_res *,
94 const struct acpi_pstate *, int);
95static const struct acpi_pstate *
96 acpi_pst_amd0f_get_pstate(const struct acpi_pst_res *,
97 const struct acpi_pstate *, int);
98
99static const struct acpi_pst_md *
100 acpi_pst_intel_probe(void);
101static int acpi_pst_intel_check_csr(const struct acpi_pst_res *,
102 const struct acpi_pst_res *);
103static int acpi_pst_intel_check_pstates(const struct acpi_pstate *, int);
104static int acpi_pst_intel_init(const struct acpi_pst_res *,
105 const struct acpi_pst_res *);
106static int acpi_pst_intel_set_pstate(const struct acpi_pst_res *,
107 const struct acpi_pst_res *, const struct acpi_pstate *);
108static const struct acpi_pstate *
109 acpi_pst_intel_get_pstate(const struct acpi_pst_res *,
110 const struct acpi_pstate *, int);
111
112static int acpi_pst_md_gas_asz(const ACPI_GENERIC_ADDRESS *);
113static int acpi_pst_md_gas_verify(const ACPI_GENERIC_ADDRESS *);
114static uint32_t acpi_pst_md_res_read(const struct acpi_pst_res *);
115static void acpi_pst_md_res_write(const struct acpi_pst_res *, uint32_t);
116
117static const struct acpi_pst_md acpi_pst_amd10 = {
118 .pmd_check_csr = acpi_pst_amd_check_csr,
119 .pmd_check_pstates = acpi_pst_amd10_check_pstates,
120 .pmd_init = acpi_pst_amd_init,
121 .pmd_set_pstate = acpi_pst_amd1x_set_pstate,
122 .pmd_get_pstate = acpi_pst_amd1x_get_pstate
123};
124
125static const struct acpi_pst_md acpi_pst_amd0f = {
126 .pmd_check_csr = acpi_pst_amd_check_csr,
127 .pmd_check_pstates = acpi_pst_amd0f_check_pstates,
128 .pmd_init = acpi_pst_amd_init,
129 .pmd_set_pstate = acpi_pst_amd0f_set_pstate,
130 .pmd_get_pstate = acpi_pst_amd0f_get_pstate
131};
132
133static const struct acpi_pst_md acpi_pst_intel = {
134 .pmd_check_csr = acpi_pst_intel_check_csr,
135 .pmd_check_pstates = acpi_pst_intel_check_pstates,
136 .pmd_init = acpi_pst_intel_init,
137 .pmd_set_pstate = acpi_pst_intel_set_pstate,
138 .pmd_get_pstate = acpi_pst_intel_get_pstate
139};
140
141static int acpi_pst_stringent_check = 1;
142TUNABLE_INT("hw.acpi.cpu.pstate.strigent_check", &acpi_pst_stringent_check);
143
144const struct acpi_pst_md *
145acpi_pst_md_probe(void)
146{
147 if (cpu_vendor_id == CPU_VENDOR_AMD)
148 return acpi_pst_amd_probe();
149 else if (cpu_vendor_id == CPU_VENDOR_INTEL)
150 return acpi_pst_intel_probe();
151 return NULL;
152}
153
154static const struct acpi_pst_md *
155acpi_pst_amd_probe(void)
156{
157 uint32_t regs[4], ext_family;
158
159 if ((cpu_id & 0x00000f00) != 0x00000f00)
160 return NULL;
161
162 /* Check whether APMI exists */
163 do_cpuid(0x80000000, regs);
164 if (regs[0] < 0x80000007)
165 return NULL;
166
167 /* Fetch APMI */
168 do_cpuid(0x80000007, regs);
169
170 ext_family = cpu_id & 0x0ff00000;
171 switch (ext_family) {
172 case 0x00000000: /* Family 0fh */
173 if ((regs[3] & 0x06) == 0x06)
174 return &acpi_pst_amd0f;
175 break;
176
177 case 0x00100000: /* Family 10h */
178 if (regs[3] & 0x80)
179 return &acpi_pst_amd10;
180 break;
181
182 default:
183 break;
184 }
185 return NULL;
186}
187
188static int
189acpi_pst_amd_check_csr(const struct acpi_pst_res *ctrl,
190 const struct acpi_pst_res *status)
191{
192 if (ctrl->pr_gas.SpaceId != ACPI_ADR_SPACE_FIXED_HARDWARE) {
193 kprintf("cpu%d: Invalid P-State control register\n", mycpuid);
194 return EINVAL;
195 }
196 if (status->pr_gas.SpaceId != ACPI_ADR_SPACE_FIXED_HARDWARE) {
197 kprintf("cpu%d: Invalid P-State status register\n", mycpuid);
198 return EINVAL;
199 }
200 return 0;
201}
202
203static int
204acpi_pst_amd1x_check_pstates(const struct acpi_pstate *pstates, int npstates,
205 uint32_t msr_start, uint32_t msr_end)
206{
207 int i;
208
209 /*
210 * Make sure that related MSR P-State registers are enabled.
211 *
212 * NOTE:
213 * We don't check status register value here;
214 * it will not be used.
215 */
216 for (i = 0; i < npstates; ++i) {
217 uint64_t pstate;
218 uint32_t msr;
219
220 msr = msr_start +
221 (pstates[i].st_cval & AMD_MSR_PSTATE_CSR_MASK);
222 if (msr >= msr_end) {
223 kprintf("cpu%d: MSR P-State register %#08x "
224 "does not exist\n", mycpuid, msr);
225 return EINVAL;
226 }
227
228 pstate = rdmsr(msr);
229 if ((pstate & AMD_MSR_PSTATE_EN) == 0) {
230 kprintf("cpu%d: MSR P-State register %#08x "
231 "is not enabled\n", mycpuid, msr);
232 return EINVAL;
233 }
234 }
235 return 0;
236}
237
238static int
239acpi_pst_amd10_check_pstates(const struct acpi_pstate *pstates, int npstates)
240{
241 /* Only P0-P4 are supported */
242 if (npstates > AMD10_MSR_PSTATE_COUNT) {
243 kprintf("cpu%d: only P0-P4 is allowed\n", mycpuid);
244 return EINVAL;
245 }
246
247 return acpi_pst_amd1x_check_pstates(pstates, npstates,
248 AMD10_MSR_PSTATE_START,
249 AMD10_MSR_PSTATE_START + AMD10_MSR_PSTATE_COUNT);
250}
251
252static int
253acpi_pst_amd1x_set_pstate(const struct acpi_pst_res *ctrl __unused,
254 const struct acpi_pst_res *status __unused,
255 const struct acpi_pstate *pstate)
256{
257 uint64_t cval;
258
259 cval = pstate->st_cval & AMD_MSR_PSTATE_CSR_MASK;
260 wrmsr(AMD1X_MSR_PSTATE_CTL, cval);
261
262 /*
263 * Don't check AMD1X_MSR_PSTATE_ST here, since it is
264 * affected by various P-State limits.
265 *
266 * For details:
267 * AMD Family 10h Processor BKDG Rev 3.20 (#31116)
268 * 2.4.2.4 P-state Transition Behavior
269 */
270
271 return 0;
272}
273
274static const struct acpi_pstate *
275acpi_pst_amd1x_get_pstate(const struct acpi_pst_res *status __unused,
276 const struct acpi_pstate *pstates, int npstates)
277{
278 uint64_t sval;
279 int i;
280
281 sval = rdmsr(AMD1X_MSR_PSTATE_ST) & AMD_MSR_PSTATE_CSR_MASK;
282 for (i = 0; i < npstates; ++i) {
283 if ((pstates[i].st_sval & AMD_MSR_PSTATE_CSR_MASK) == sval)
284 return &pstates[i];
285 }
286 return NULL;
287}
288
289static int
290acpi_pst_amd0f_check_pstates(const struct acpi_pstate *pstates, int npstates)
291{
292 struct amd0f_fidvid fv_max, fv_min;
293 int i;
294
295 amd0f_fidvid_limit(&fv_min, &fv_max);
296
297 if (fv_min.fid == fv_max.fid && fv_min.vid == fv_max.vid) {
298 kprintf("cpu%d: only one P-State is supported\n", mycpuid);
299 if (acpi_pst_stringent_check)
300 return EOPNOTSUPP;
301 }
302
303 for (i = 0; i < npstates; ++i) {
304 const struct acpi_pstate *p = &pstates[i];
305 uint32_t fid, vid, mvs, rvo;
306 int mvs_mv, rvo_mv;
307
308 fid = AMD0F_PST_CTL_FID(p->st_cval);
309 vid = AMD0F_PST_CTL_VID(p->st_cval);
310
311 if (i == 0) {
312 if (vid != fv_max.vid) {
313 kprintf("cpu%d: max VID mismatch "
314 "real %u, lim %d\n", mycpuid,
315 vid, fv_max.vid);
316 }
317 if (fid != fv_max.fid) {
318 kprintf("cpu%d: max FID mismatch "
319 "real %u, lim %d\n", mycpuid,
320 fid, fv_max.fid);
321 }
322 } else if (i == npstates - 1) {
323 if (vid != fv_min.vid) {
324 kprintf("cpu%d: min VID mismatch "
325 "real %u, lim %d\n", mycpuid,
326 vid, fv_min.vid);
327 }
328 if (fid != fv_min.fid) {
329 kprintf("cpu%d: min FID mismatch "
330 "real %u, lim %d\n", mycpuid,
331 fid, fv_min.fid);
332 }
333 } else {
334 if (fid >= fv_max.fid || fid < (fv_min.fid + 0x8)) {
335 kprintf("cpu%d: Invalid FID %#x, "
336 "out [%#x, %#x]\n", mycpuid, fid,
337 fv_min.fid + 0x8, fv_max.fid);
338 if (acpi_pst_stringent_check)
339 return EINVAL;
340 }
341 if (vid < fv_max.vid || vid > fv_min.vid) {
342 kprintf("cpu%d: Invalid VID %#x, "
343 "in [%#x, %#x]\n", mycpuid, vid,
344 fv_max.vid, fv_min.vid);
345 if (acpi_pst_stringent_check)
346 return EINVAL;
347 }
348 }
349
350 mvs = AMD0F_PST_CTL_MVS(p->st_cval);
351 rvo = AMD0F_PST_CTL_RVO(p->st_cval);
352
353 /* Only 0 is allowed, i.e. 25mV stepping */
354 if (mvs != 0) {
355 kprintf("cpu%d: Invalid MVS %#x\n", mycpuid, mvs);
356 return EINVAL;
357 }
358
359 /* -> mV */
360 mvs_mv = 25 * (1 << mvs);
361 rvo_mv = 25 * rvo;
362 if (rvo_mv % mvs_mv != 0) {
363 kprintf("cpu%d: Invalid MVS/RVO (%#x/%#x)\n",
364 mycpuid, mvs, rvo);
365 return EINVAL;
366 }
367 }
368 return 0;
369}
370
371static int
372acpi_pst_amd0f_set_pstate(const struct acpi_pst_res *ctrl __unused,
373 const struct acpi_pst_res *status __unused,
374 const struct acpi_pstate *pstate)
375{
376 struct amd0f_fidvid fv;
377 struct amd0f_xsit xsit;
378
379 fv.fid = AMD0F_PST_CTL_FID(pstate->st_cval);
380 fv.vid = AMD0F_PST_CTL_VID(pstate->st_cval);
381
382 xsit.rvo = AMD0F_PST_CTL_RVO(pstate->st_cval);
383 xsit.mvs = AMD0F_PST_CTL_MVS(pstate->st_cval);
384 xsit.vst = AMD0F_PST_CTL_VST(pstate->st_cval);
385 xsit.pll_time = AMD0F_PST_CTL_PLLTIME(pstate->st_cval);
386 xsit.irt = AMD0F_PST_CTL_IRT(pstate->st_cval);
387
388 return amd0f_set_fidvid(&fv, &xsit);
389}
390
391static const struct acpi_pstate *
392acpi_pst_amd0f_get_pstate(const struct acpi_pst_res *status __unused,
393 const struct acpi_pstate *pstates, int npstates)
394{
395 struct amd0f_fidvid fv;
396 int error, i;
397
398 error = amd0f_get_fidvid(&fv);
399 if (error)
400 return NULL;
401
402 for (i = 0; i < npstates; ++i) {
403 const struct acpi_pstate *p = &pstates[i];
404
405 if (fv.fid == AMD0F_PST_ST_FID(p->st_sval) &&
406 fv.vid == AMD0F_PST_ST_VID(p->st_sval))
407 return p;
408 }
409 return NULL;
410}
411
412static int
413acpi_pst_amd_init(const struct acpi_pst_res *ctrl __unused,
414 const struct acpi_pst_res *status __unused)
415{
416 return 0;
417}
418
419static const struct acpi_pst_md *
420acpi_pst_intel_probe(void)
421{
422 uint32_t family;
423
424 if ((cpu_feature2 & CPUID2_EST) == 0)
425 return NULL;
426
427 family = cpu_id & 0xf00;
428 if (family != 0xf00 && family != 0x600)
429 return NULL;
430 return &acpi_pst_intel;
431}
432
433static int
434acpi_pst_intel_check_csr(const struct acpi_pst_res *ctrl,
435 const struct acpi_pst_res *status)
436{
437 int error;
438
439 if (ctrl->pr_gas.SpaceId != status->pr_gas.SpaceId) {
440 kprintf("cpu%d: P-State control(%d)/status(%d) registers have "
441 "different SpaceId", mycpuid,
442 ctrl->pr_gas.SpaceId, status->pr_gas.SpaceId);
443 return EINVAL;
444 }
445
446 switch (ctrl->pr_gas.SpaceId) {
447 case ACPI_ADR_SPACE_FIXED_HARDWARE:
448 if (ctrl->pr_res != NULL || status->pr_res != NULL) {
449 /* XXX should panic() */
450 kprintf("cpu%d: Allocated resource for fixed hardware "
451 "registers\n", mycpuid);
452 return EINVAL;
453 }
454 break;
455
456 case ACPI_ADR_SPACE_SYSTEM_IO:
457 if (ctrl->pr_res == NULL) {
458 kprintf("cpu%d: ioport allocation failed for control "
459 "register\n", mycpuid);
460 return ENXIO;
461 }
462 error = acpi_pst_md_gas_verify(&ctrl->pr_gas);
463 if (error) {
464 kprintf("cpu%d: Invalid control register GAS\n",
465 mycpuid);
466 return error;
467 }
468
469 if (status->pr_res == NULL) {
470 kprintf("cpu%d: ioport allocation failed for status "
471 "register\n", mycpuid);
472 return ENXIO;
473 }
474 error = acpi_pst_md_gas_verify(&status->pr_gas);
475 if (error) {
476 kprintf("cpu%d: Invalid status register GAS\n",
477 mycpuid);
478 return error;
479 }
480 break;
481
482 default:
483 kprintf("cpu%d: Invalid P-State control/status register "
484 "SpaceId %d\n", mycpuid, ctrl->pr_gas.SpaceId);
485 return EOPNOTSUPP;
486 }
487 return 0;
488}
489
490static int
491acpi_pst_intel_check_pstates(const struct acpi_pstate *pstates __unused,
492 int npstates __unused)
493{
494 return 0;
495}
496
497static int
498acpi_pst_intel_init(const struct acpi_pst_res *ctrl __unused,
499 const struct acpi_pst_res *status __unused)
500{
501 uint32_t family, model;
502 uint64_t misc_enable;
503
504 family = cpu_id & 0xf00;
505 if (family == 0xf00) {
506 /* EST enable bit is reserved in INTEL_MSR_MISC_ENABLE */
507 return 0;
508 }
509 KKASSERT(family == 0x600);
510
511 model = ((cpu_id & 0xf0000) >> 12) | ((cpu_id & 0xf0) >> 4);
512 if (model < 0xd) {
513 /* EST enable bit is reserved in INTEL_MSR_MISC_ENABLE */
514 return 0;
515 }
516
517 misc_enable = rdmsr(INTEL_MSR_MISC_ENABLE);
518 if ((misc_enable & INTEL_MSR_MISC_EST_EN) == 0) {
519 misc_enable |= INTEL_MSR_MISC_EST_EN;
520 wrmsr(INTEL_MSR_MISC_ENABLE, misc_enable);
521
522 misc_enable = rdmsr(INTEL_MSR_MISC_ENABLE);
523 if ((misc_enable & INTEL_MSR_MISC_EST_EN) == 0) {
524 kprintf("cpu%d: Can't enable EST\n", mycpuid);
525 return EIO;
526 }
527 }
528 return 0;
529}
530
531static int
532acpi_pst_intel_set_pstate(const struct acpi_pst_res *ctrl,
533 const struct acpi_pst_res *status __unused,
534 const struct acpi_pstate *pstate)
535{
536 if (ctrl->pr_res != NULL) {
537 acpi_pst_md_res_write(ctrl, pstate->st_cval);
538 } else {
539 uint64_t ctl;
540
541 ctl = rdmsr(INTEL_MSR_PERF_CTL);
542 ctl &= ~INTEL_MSR_PERF_MASK;
543 ctl |= (pstate->st_cval & INTEL_MSR_PERF_MASK);
544 wrmsr(INTEL_MSR_PERF_CTL, ctl);
545 }
546 return 0;
547}
548
549static const struct acpi_pstate *
550acpi_pst_intel_get_pstate(const struct acpi_pst_res *status,
551 const struct acpi_pstate *pstates, int npstates)
552{
553 int i;
554
555 if (status->pr_res != NULL) {
556 uint32_t st;
557
558 st = acpi_pst_md_res_read(status);
559 for (i = 0; i < npstates; ++i) {
560 if (pstates[i].st_sval == st)
561 return &pstates[i];
562 }
563 } else {
564 uint64_t sval;
565
566 sval = rdmsr(INTEL_MSR_PERF_STATUS) & INTEL_MSR_PERF_MASK;
567 for (i = 0; i < npstates; ++i) {
568 if ((pstates[i].st_sval & INTEL_MSR_PERF_MASK) == sval)
569 return &pstates[i];
570 }
571 }
572 return NULL;
573}
574
575static int
576acpi_pst_md_gas_asz(const ACPI_GENERIC_ADDRESS *gas)
577{
578 int asz;
579
580 if (gas->AccessWidth != 0)
581 asz = gas->AccessWidth;
582 else
583 asz = gas->BitWidth / NBBY;
584 switch (asz) {
585 case 1:
586 case 2:
587 case 4:
588 break;
589 default:
590 asz = 0;
591 break;
592 }
593 return asz;
594}
595
596static int
597acpi_pst_md_gas_verify(const ACPI_GENERIC_ADDRESS *gas)
598{
599 int reg, end, asz;
600
601 if (gas->BitOffset % NBBY != 0)
602 return EINVAL;
603
604 end = gas->BitWidth / NBBY;
605 reg = gas->BitOffset / NBBY;
606
607 if (reg >= end)
608 return EINVAL;
609
610 asz = acpi_pst_md_gas_asz(gas);
611 if (asz == 0)
612 return EINVAL;
613
614 if (reg + asz > end)
615 return EINVAL;
616 return 0;
617}
618
619static uint32_t
620acpi_pst_md_res_read(const struct acpi_pst_res *res)
621{
622 int asz, reg;
623
624 KKASSERT(res->pr_res != NULL);
625 asz = acpi_pst_md_gas_asz(&res->pr_gas);
626 reg = res->pr_gas.BitOffset / NBBY;
627
628 switch (asz) {
629 case 1:
630 return bus_space_read_1(res->pr_bt, res->pr_bh, reg);
631 case 2:
632 return bus_space_read_2(res->pr_bt, res->pr_bh, reg);
633 case 4:
634 return bus_space_read_4(res->pr_bt, res->pr_bh, reg);
635 }
636 panic("unsupported access width %d\n", asz);
637
638 /* NEVER REACHED */
639 return 0;
640}
641
642static void
643acpi_pst_md_res_write(const struct acpi_pst_res *res, uint32_t val)
644{
645 int asz, reg;
646
647 KKASSERT(res->pr_res != NULL);
648 asz = acpi_pst_md_gas_asz(&res->pr_gas);
649 reg = res->pr_gas.BitOffset / NBBY;
650
651 switch (asz) {
652 case 1:
653 bus_space_write_1(res->pr_bt, res->pr_bh, reg, val);
654 break;
655 case 2:
656 bus_space_write_2(res->pr_bt, res->pr_bh, reg, val);
657 break;
658 case 4:
659 bus_space_write_4(res->pr_bt, res->pr_bh, reg, val);
660 break;
661 default:
662 panic("unsupported access width %d\n", asz);
663 }
664}