/sbin/swapon : adds a -i flag which prompts for user confirmation.
[dragonfly.git] / sys / platform / pc64 / acpica / acpi_pstate_machdep.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Sepherosa Ziehau <sepherosa@gmail.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#include <sys/param.h>
36#include <sys/kernel.h>
37#include <sys/systm.h>
38#include <sys/globaldata.h>
39
40#include <machine/md_var.h>
41#include <machine/cpufunc.h>
42#include <machine/cpufreq.h>
43#include <machine/cputypes.h>
44#include <machine/specialreg.h>
45
46#include "acpi.h"
47#include "acpi_cpu_pstate.h"
48
49#define AMD_APMI_HWPSTATE 0x80
50
51#define AMD_MSR_PSTATE_CSR_MASK 0x7ULL
52#define AMD1X_MSR_PSTATE_CTL 0xc0010062
53#define AMD1X_MSR_PSTATE_ST 0xc0010063
54
55#define AMD_MSR_PSTATE_EN 0x8000000000000000ULL
56
57#define AMD10_MSR_PSTATE_START 0xc0010064
58#define AMD10_MSR_PSTATE_COUNT 5
59
60#define AMD0F_PST_CTL_FID(cval) (((cval) >> 0) & 0x3f)
61#define AMD0F_PST_CTL_VID(cval) (((cval) >> 6) & 0x1f)
62#define AMD0F_PST_CTL_VST(cval) (((cval) >> 11) & 0x7f)
63#define AMD0F_PST_CTL_MVS(cval) (((cval) >> 18) & 0x3)
64#define AMD0F_PST_CTL_PLLTIME(cval) (((cval) >> 20) & 0x7f)
65#define AMD0F_PST_CTL_RVO(cval) (((cval) >> 28) & 0x3)
66#define AMD0F_PST_CTL_IRT(cval) (((cval) >> 30) & 0x3)
67
68#define AMD0F_PST_ST_FID(sval) (((sval) >> 0) & 0x3f)
69#define AMD0F_PST_ST_VID(sval) (((sval) >> 6) & 0x3f)
70
71#define INTEL_MSR_MISC_ENABLE 0x1a0
72#define INTEL_MSR_MISC_EST_EN 0x10000ULL
73
74#define INTEL_MSR_PERF_STATUS 0x198
75#define INTEL_MSR_PERF_CTL 0x199
76#define INTEL_MSR_PERF_MASK 0xffffULL
77
78static const struct acpi_pst_md *
79 acpi_pst_amd_probe(void);
80static int acpi_pst_amd_check_csr(const struct acpi_pst_res *,
81 const struct acpi_pst_res *);
82static int acpi_pst_amd1x_check_pstates(const struct acpi_pstate *, int,
83 uint32_t, uint32_t);
84static int acpi_pst_amd10_check_pstates(const struct acpi_pstate *, int);
85static int acpi_pst_amd0f_check_pstates(const struct acpi_pstate *, int);
86static int acpi_pst_amd_init(const struct acpi_pst_res *,
87 const struct acpi_pst_res *);
88static int acpi_pst_amd1x_set_pstate(const struct acpi_pst_res *,
89 const struct acpi_pst_res *, const struct acpi_pstate *);
90static int acpi_pst_amd0f_set_pstate(const struct acpi_pst_res *,
91 const struct acpi_pst_res *, const struct acpi_pstate *);
92static const struct acpi_pstate *
93 acpi_pst_amd1x_get_pstate(const struct acpi_pst_res *,
94 const struct acpi_pstate *, int);
95static const struct acpi_pstate *
96 acpi_pst_amd0f_get_pstate(const struct acpi_pst_res *,
97 const struct acpi_pstate *, int);
98
99static const struct acpi_pst_md *
100 acpi_pst_intel_probe(void);
101static int acpi_pst_intel_check_csr(const struct acpi_pst_res *,
102 const struct acpi_pst_res *);
103static int acpi_pst_intel_check_pstates(const struct acpi_pstate *, int);
104static int acpi_pst_intel_init(const struct acpi_pst_res *,
105 const struct acpi_pst_res *);
106static int acpi_pst_intel_set_pstate(const struct acpi_pst_res *,
107 const struct acpi_pst_res *, const struct acpi_pstate *);
108static const struct acpi_pstate *
109 acpi_pst_intel_get_pstate(const struct acpi_pst_res *,
110 const struct acpi_pstate *, int);
111
112static int acpi_pst_md_gas_asz(const ACPI_GENERIC_ADDRESS *);
113static int acpi_pst_md_gas_verify(const ACPI_GENERIC_ADDRESS *);
114static uint32_t acpi_pst_md_res_read(const struct acpi_pst_res *);
115static void acpi_pst_md_res_write(const struct acpi_pst_res *, uint32_t);
116
117static const struct acpi_pst_md acpi_pst_amd10 = {
118 .pmd_check_csr = acpi_pst_amd_check_csr,
119 .pmd_check_pstates = acpi_pst_amd10_check_pstates,
120 .pmd_init = acpi_pst_amd_init,
121 .pmd_set_pstate = acpi_pst_amd1x_set_pstate,
122 .pmd_get_pstate = acpi_pst_amd1x_get_pstate
123};
124
125static const struct acpi_pst_md acpi_pst_amd0f = {
126 .pmd_check_csr = acpi_pst_amd_check_csr,
127 .pmd_check_pstates = acpi_pst_amd0f_check_pstates,
128 .pmd_init = acpi_pst_amd_init,
129 .pmd_set_pstate = acpi_pst_amd0f_set_pstate,
130 .pmd_get_pstate = acpi_pst_amd0f_get_pstate
131};
132
133static const struct acpi_pst_md acpi_pst_intel = {
134 .pmd_check_csr = acpi_pst_intel_check_csr,
135 .pmd_check_pstates = acpi_pst_intel_check_pstates,
136 .pmd_init = acpi_pst_intel_init,
137 .pmd_set_pstate = acpi_pst_intel_set_pstate,
138 .pmd_get_pstate = acpi_pst_intel_get_pstate
139};
140
141static int acpi_pst_stringent_check = 1;
142TUNABLE_INT("hw.acpi.cpu.pstate.strigent_check", &acpi_pst_stringent_check);
143
144const struct acpi_pst_md *
145acpi_pst_md_probe(void)
146{
147 if (cpu_vendor_id == CPU_VENDOR_AMD)
148 return acpi_pst_amd_probe();
149 else if (cpu_vendor_id == CPU_VENDOR_INTEL)
150 return acpi_pst_intel_probe();
151 return NULL;
152}
153
154static const struct acpi_pst_md *
155acpi_pst_amd_probe(void)
156{
157 uint32_t regs[4];
158
159 /* Only Family >= 0fh has P-State support */
160 if (CPUID_TO_FAMILY(cpu_id) < 0xf)
161 return NULL;
162
163 /* Check whether APMI exists */
164 if (cpu_exthigh < 0x80000007)
165 return NULL;
166
167 /* Fetch APMI */
168 do_cpuid(0x80000007, regs);
169
170 if (CPUID_TO_FAMILY(cpu_id) == 0xf) { /* Family 0fh */
171 if ((regs[3] & 0x06) == 0x06)
172 return &acpi_pst_amd0f;
173 } else if (CPUID_TO_FAMILY(cpu_id) >= 0x10) { /* Family >= 10h */
174 if (regs[3] & 0x80)
175 return &acpi_pst_amd10;
176 }
177 return NULL;
178}
179
180static int
181acpi_pst_amd_check_csr(const struct acpi_pst_res *ctrl,
182 const struct acpi_pst_res *status)
183{
184 if (ctrl->pr_gas.SpaceId != ACPI_ADR_SPACE_FIXED_HARDWARE) {
185 kprintf("cpu%d: Invalid P-State control register\n", mycpuid);
186 return EINVAL;
187 }
188 if (status->pr_gas.SpaceId != ACPI_ADR_SPACE_FIXED_HARDWARE) {
189 kprintf("cpu%d: Invalid P-State status register\n", mycpuid);
190 return EINVAL;
191 }
192 return 0;
193}
194
195static int
196acpi_pst_amd1x_check_pstates(const struct acpi_pstate *pstates, int npstates,
197 uint32_t msr_start, uint32_t msr_end)
198{
199 int i;
200
201 /*
202 * Make sure that related MSR P-State registers are enabled.
203 *
204 * NOTE:
205 * We don't check status register value here;
206 * it will not be used.
207 */
208 for (i = 0; i < npstates; ++i) {
209 uint64_t pstate;
210 uint32_t msr;
211
212 msr = msr_start +
213 (pstates[i].st_cval & AMD_MSR_PSTATE_CSR_MASK);
214 if (msr >= msr_end) {
215 kprintf("cpu%d: MSR P-State register %#08x "
216 "does not exist\n", mycpuid, msr);
217 return EINVAL;
218 }
219
220 pstate = rdmsr(msr);
221 if ((pstate & AMD_MSR_PSTATE_EN) == 0) {
222 kprintf("cpu%d: MSR P-State register %#08x "
223 "is not enabled\n", mycpuid, msr);
224 return EINVAL;
225 }
226 }
227 return 0;
228}
229
230static int
231acpi_pst_amd10_check_pstates(const struct acpi_pstate *pstates, int npstates)
232{
233 /* Only P0-P4 are supported */
234 if (npstates > AMD10_MSR_PSTATE_COUNT) {
235 kprintf("cpu%d: only P0-P4 is allowed\n", mycpuid);
236 return EINVAL;
237 }
238
239 return acpi_pst_amd1x_check_pstates(pstates, npstates,
240 AMD10_MSR_PSTATE_START,
241 AMD10_MSR_PSTATE_START + AMD10_MSR_PSTATE_COUNT);
242}
243
244static int
245acpi_pst_amd1x_set_pstate(const struct acpi_pst_res *ctrl __unused,
246 const struct acpi_pst_res *status __unused,
247 const struct acpi_pstate *pstate)
248{
249 uint64_t cval;
250
251 cval = pstate->st_cval & AMD_MSR_PSTATE_CSR_MASK;
252 wrmsr(AMD1X_MSR_PSTATE_CTL, cval);
253
254 /*
255 * Don't check AMD1X_MSR_PSTATE_ST here, since it is
256 * affected by various P-State limits.
257 *
258 * For details:
259 * AMD Family 10h Processor BKDG Rev 3.20 (#31116)
260 * 2.4.2.4 P-state Transition Behavior
261 */
262
263 return 0;
264}
265
266static const struct acpi_pstate *
267acpi_pst_amd1x_get_pstate(const struct acpi_pst_res *status __unused,
268 const struct acpi_pstate *pstates, int npstates)
269{
270 uint64_t sval;
271 int i;
272
273 sval = rdmsr(AMD1X_MSR_PSTATE_ST) & AMD_MSR_PSTATE_CSR_MASK;
274 for (i = 0; i < npstates; ++i) {
275 if ((pstates[i].st_sval & AMD_MSR_PSTATE_CSR_MASK) == sval)
276 return &pstates[i];
277 }
278 return NULL;
279}
280
281static int
282acpi_pst_amd0f_check_pstates(const struct acpi_pstate *pstates, int npstates)
283{
284 struct amd0f_fidvid fv_max, fv_min;
285 int i;
286
287 amd0f_fidvid_limit(&fv_min, &fv_max);
288
289 if (fv_min.fid == fv_max.fid && fv_min.vid == fv_max.vid) {
290 kprintf("cpu%d: only one P-State is supported\n", mycpuid);
291 if (acpi_pst_stringent_check)
292 return EOPNOTSUPP;
293 }
294
295 for (i = 0; i < npstates; ++i) {
296 const struct acpi_pstate *p = &pstates[i];
297 uint32_t fid, vid, mvs, rvo;
298 int mvs_mv, rvo_mv;
299
300 fid = AMD0F_PST_CTL_FID(p->st_cval);
301 vid = AMD0F_PST_CTL_VID(p->st_cval);
302
303 if (i == 0) {
304 if (vid != fv_max.vid) {
305 kprintf("cpu%d: max VID mismatch "
306 "real %u, lim %d\n", mycpuid,
307 vid, fv_max.vid);
308 }
309 if (fid != fv_max.fid) {
310 kprintf("cpu%d: max FID mismatch "
311 "real %u, lim %d\n", mycpuid,
312 fid, fv_max.fid);
313 }
314 } else if (i == npstates - 1) {
315 if (vid != fv_min.vid) {
316 kprintf("cpu%d: min VID mismatch "
317 "real %u, lim %d\n", mycpuid,
318 vid, fv_min.vid);
319 }
320 if (fid != fv_min.fid) {
321 kprintf("cpu%d: min FID mismatch "
322 "real %u, lim %d\n", mycpuid,
323 fid, fv_min.fid);
324 }
325 } else {
326 if (fid >= fv_max.fid || fid < (fv_min.fid + 0x8)) {
327 kprintf("cpu%d: Invalid FID %#x, "
328 "out [%#x, %#x]\n", mycpuid, fid,
329 fv_min.fid + 0x8, fv_max.fid);
330 if (acpi_pst_stringent_check)
331 return EINVAL;
332 }
333 if (vid < fv_max.vid || vid > fv_min.vid) {
334 kprintf("cpu%d: Invalid VID %#x, "
335 "in [%#x, %#x]\n", mycpuid, vid,
336 fv_max.vid, fv_min.vid);
337 if (acpi_pst_stringent_check)
338 return EINVAL;
339 }
340 }
341
342 mvs = AMD0F_PST_CTL_MVS(p->st_cval);
343 rvo = AMD0F_PST_CTL_RVO(p->st_cval);
344
345 /* Only 0 is allowed, i.e. 25mV stepping */
346 if (mvs != 0) {
347 kprintf("cpu%d: Invalid MVS %#x\n", mycpuid, mvs);
348 return EINVAL;
349 }
350
351 /* -> mV */
352 mvs_mv = 25 * (1 << mvs);
353 rvo_mv = 25 * rvo;
354 if (rvo_mv % mvs_mv != 0) {
355 kprintf("cpu%d: Invalid MVS/RVO (%#x/%#x)\n",
356 mycpuid, mvs, rvo);
357 return EINVAL;
358 }
359 }
360 return 0;
361}
362
363static int
364acpi_pst_amd0f_set_pstate(const struct acpi_pst_res *ctrl __unused,
365 const struct acpi_pst_res *status __unused,
366 const struct acpi_pstate *pstate)
367{
368 struct amd0f_fidvid fv;
369 struct amd0f_xsit xsit;
370
371 fv.fid = AMD0F_PST_CTL_FID(pstate->st_cval);
372 fv.vid = AMD0F_PST_CTL_VID(pstate->st_cval);
373
374 xsit.rvo = AMD0F_PST_CTL_RVO(pstate->st_cval);
375 xsit.mvs = AMD0F_PST_CTL_MVS(pstate->st_cval);
376 xsit.vst = AMD0F_PST_CTL_VST(pstate->st_cval);
377 xsit.pll_time = AMD0F_PST_CTL_PLLTIME(pstate->st_cval);
378 xsit.irt = AMD0F_PST_CTL_IRT(pstate->st_cval);
379
380 return amd0f_set_fidvid(&fv, &xsit);
381}
382
383static const struct acpi_pstate *
384acpi_pst_amd0f_get_pstate(const struct acpi_pst_res *status __unused,
385 const struct acpi_pstate *pstates, int npstates)
386{
387 struct amd0f_fidvid fv;
388 int error, i;
389
390 error = amd0f_get_fidvid(&fv);
391 if (error)
392 return NULL;
393
394 for (i = 0; i < npstates; ++i) {
395 const struct acpi_pstate *p = &pstates[i];
396
397 if (fv.fid == AMD0F_PST_ST_FID(p->st_sval) &&
398 fv.vid == AMD0F_PST_ST_VID(p->st_sval))
399 return p;
400 }
401 return NULL;
402}
403
404static int
405acpi_pst_amd_init(const struct acpi_pst_res *ctrl __unused,
406 const struct acpi_pst_res *status __unused)
407{
408 return 0;
409}
410
411static const struct acpi_pst_md *
412acpi_pst_intel_probe(void)
413{
414 uint32_t family;
415
416 if ((cpu_feature2 & CPUID2_EST) == 0)
417 return NULL;
418
419 family = cpu_id & 0xf00;
420 if (family != 0xf00 && family != 0x600)
421 return NULL;
422 return &acpi_pst_intel;
423}
424
425static int
426acpi_pst_intel_check_csr(const struct acpi_pst_res *ctrl,
427 const struct acpi_pst_res *status)
428{
429 int error;
430
431 if (ctrl->pr_gas.SpaceId != status->pr_gas.SpaceId) {
432 kprintf("cpu%d: P-State control(%d)/status(%d) registers have "
433 "different SpaceId", mycpuid,
434 ctrl->pr_gas.SpaceId, status->pr_gas.SpaceId);
435 return EINVAL;
436 }
437
438 switch (ctrl->pr_gas.SpaceId) {
439 case ACPI_ADR_SPACE_FIXED_HARDWARE:
440 if (ctrl->pr_res != NULL || status->pr_res != NULL) {
441 /* XXX should panic() */
442 kprintf("cpu%d: Allocated resource for fixed hardware "
443 "registers\n", mycpuid);
444 return EINVAL;
445 }
446 break;
447
448 case ACPI_ADR_SPACE_SYSTEM_IO:
449 if (ctrl->pr_res == NULL) {
450 kprintf("cpu%d: ioport allocation failed for control "
451 "register\n", mycpuid);
452 return ENXIO;
453 }
454 error = acpi_pst_md_gas_verify(&ctrl->pr_gas);
455 if (error) {
456 kprintf("cpu%d: Invalid control register GAS\n",
457 mycpuid);
458 return error;
459 }
460
461 if (status->pr_res == NULL) {
462 kprintf("cpu%d: ioport allocation failed for status "
463 "register\n", mycpuid);
464 return ENXIO;
465 }
466 error = acpi_pst_md_gas_verify(&status->pr_gas);
467 if (error) {
468 kprintf("cpu%d: Invalid status register GAS\n",
469 mycpuid);
470 return error;
471 }
472 break;
473
474 default:
475 kprintf("cpu%d: Invalid P-State control/status register "
476 "SpaceId %d\n", mycpuid, ctrl->pr_gas.SpaceId);
477 return EOPNOTSUPP;
478 }
479 return 0;
480}
481
482static int
483acpi_pst_intel_check_pstates(const struct acpi_pstate *pstates __unused,
484 int npstates __unused)
485{
486 return 0;
487}
488
489static int
490acpi_pst_intel_init(const struct acpi_pst_res *ctrl __unused,
491 const struct acpi_pst_res *status __unused)
492{
493 uint32_t family, model;
494 uint64_t misc_enable;
495
496 family = cpu_id & 0xf00;
497 if (family == 0xf00) {
498 /* EST enable bit is reserved in INTEL_MSR_MISC_ENABLE */
499 return 0;
500 }
501 KKASSERT(family == 0x600);
502
503 model = ((cpu_id & 0xf0000) >> 12) | ((cpu_id & 0xf0) >> 4);
504 if (model < 0xd) {
505 /* EST enable bit is reserved in INTEL_MSR_MISC_ENABLE */
506 return 0;
507 }
508
509 misc_enable = rdmsr(INTEL_MSR_MISC_ENABLE);
510 if ((misc_enable & INTEL_MSR_MISC_EST_EN) == 0) {
511 misc_enable |= INTEL_MSR_MISC_EST_EN;
512 wrmsr(INTEL_MSR_MISC_ENABLE, misc_enable);
513
514 misc_enable = rdmsr(INTEL_MSR_MISC_ENABLE);
515 if ((misc_enable & INTEL_MSR_MISC_EST_EN) == 0) {
516 kprintf("cpu%d: Can't enable EST\n", mycpuid);
517 return EIO;
518 }
519 }
520 return 0;
521}
522
523static int
524acpi_pst_intel_set_pstate(const struct acpi_pst_res *ctrl,
525 const struct acpi_pst_res *status __unused,
526 const struct acpi_pstate *pstate)
527{
528 if (ctrl->pr_res != NULL) {
529 acpi_pst_md_res_write(ctrl, pstate->st_cval);
530 } else {
531 uint64_t ctl;
532
533 ctl = rdmsr(INTEL_MSR_PERF_CTL);
534 ctl &= ~INTEL_MSR_PERF_MASK;
535 ctl |= (pstate->st_cval & INTEL_MSR_PERF_MASK);
536 wrmsr(INTEL_MSR_PERF_CTL, ctl);
537 }
538 return 0;
539}
540
541static const struct acpi_pstate *
542acpi_pst_intel_get_pstate(const struct acpi_pst_res *status,
543 const struct acpi_pstate *pstates, int npstates)
544{
545 int i;
546
547 if (status->pr_res != NULL) {
548 uint32_t st;
549
550 st = acpi_pst_md_res_read(status);
551 for (i = 0; i < npstates; ++i) {
552 if (pstates[i].st_sval == st)
553 return &pstates[i];
554 }
555 } else {
556 uint64_t sval;
557
558 sval = rdmsr(INTEL_MSR_PERF_STATUS) & INTEL_MSR_PERF_MASK;
559 for (i = 0; i < npstates; ++i) {
560 if ((pstates[i].st_sval & INTEL_MSR_PERF_MASK) == sval)
561 return &pstates[i];
562 }
563 }
564 return NULL;
565}
566
567static int
568acpi_pst_md_gas_asz(const ACPI_GENERIC_ADDRESS *gas)
569{
570 int asz;
571
572 if (gas->AccessWidth != 0)
573 asz = gas->AccessWidth;
574 else
575 asz = gas->BitWidth / NBBY;
576 switch (asz) {
577 case 1:
578 case 2:
579 case 4:
580 break;
581 default:
582 asz = 0;
583 break;
584 }
585 return asz;
586}
587
588static int
589acpi_pst_md_gas_verify(const ACPI_GENERIC_ADDRESS *gas)
590{
591 int reg, end, asz;
592
593 if (gas->BitOffset % NBBY != 0)
594 return EINVAL;
595
596 end = gas->BitWidth / NBBY;
597 reg = gas->BitOffset / NBBY;
598
599 if (reg >= end)
600 return EINVAL;
601
602 asz = acpi_pst_md_gas_asz(gas);
603 if (asz == 0)
604 return EINVAL;
605
606 if (reg + asz > end)
607 return EINVAL;
608 return 0;
609}
610
611static uint32_t
612acpi_pst_md_res_read(const struct acpi_pst_res *res)
613{
614 int asz, reg;
615
616 KKASSERT(res->pr_res != NULL);
617 asz = acpi_pst_md_gas_asz(&res->pr_gas);
618 reg = res->pr_gas.BitOffset / NBBY;
619
620 switch (asz) {
621 case 1:
622 return bus_space_read_1(res->pr_bt, res->pr_bh, reg);
623 case 2:
624 return bus_space_read_2(res->pr_bt, res->pr_bh, reg);
625 case 4:
626 return bus_space_read_4(res->pr_bt, res->pr_bh, reg);
627 }
628 panic("unsupported access width %d", asz);
629
630 /* NEVER REACHED */
631 return 0;
632}
633
634static void
635acpi_pst_md_res_write(const struct acpi_pst_res *res, uint32_t val)
636{
637 int asz, reg;
638
639 KKASSERT(res->pr_res != NULL);
640 asz = acpi_pst_md_gas_asz(&res->pr_gas);
641 reg = res->pr_gas.BitOffset / NBBY;
642
643 switch (asz) {
644 case 1:
645 bus_space_write_1(res->pr_bt, res->pr_bh, reg, val);
646 break;
647 case 2:
648 bus_space_write_2(res->pr_bt, res->pr_bh, reg, val);
649 break;
650 case 4:
651 bus_space_write_4(res->pr_bt, res->pr_bh, reg, val);
652 break;
653 default:
654 panic("unsupported access width %d", asz);
655 }
656}