2 * Copyright (c) 1999 Michael Smith <msmith@freebsd.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/i386/i386/i686_mem.c,v 1.8.2.4 2002/09/24 08:12:51 mdodd Exp $
27 * $DragonFly: src/sys/platform/pc32/i386/i686_mem.c,v 1.2 2003/06/17 04:28:35 dillon Exp $
30 #include <sys/param.h>
31 #include <sys/kernel.h>
32 #include <sys/systm.h>
33 #include <sys/malloc.h>
34 #include <sys/memrange.h>
36 #include <machine/md_var.h>
37 #include <machine/specialreg.h>
40 #include <machine/smp.h>
44 * i686 memory range operations
46 * This code will probably be impenetrable without reference to the
47 * Intel Pentium Pro documentation.
50 static char *mem_owner_bios = "BIOS";
52 #define MR686_FIXMTRR (1<<0)
54 #define mrwithin(mr, a) \
55 (((a) >= (mr)->mr_base) && ((a) < ((mr)->mr_base + (mr)->mr_len)))
56 #define mroverlap(mra, mrb) \
57 (mrwithin(mra, mrb->mr_base) || mrwithin(mrb, mra->mr_base))
59 #define mrvalid(base, len) \
60 ((!(base & ((1 << 12) - 1))) && /* base is multiple of 4k */ \
61 ((len) >= (1 << 12)) && /* length is >= 4k */ \
62 powerof2((len)) && /* ... and power of two */ \
63 !((base) & ((len) - 1))) /* range is not discontiuous */
65 #define mrcopyflags(curr, new) (((curr) & ~MDF_ATTRMASK) | ((new) & MDF_ATTRMASK))
67 static void i686_mrinit(struct mem_range_softc *sc);
68 static int i686_mrset(struct mem_range_softc *sc,
69 struct mem_range_desc *mrd,
71 static void i686_mrAPinit(struct mem_range_softc *sc);
73 static struct mem_range_ops i686_mrops = {
79 /* XXX for AP startup hook */
80 static u_int64_t mtrrcap, mtrrdef;
82 static struct mem_range_desc *mem_range_match(struct mem_range_softc *sc,
83 struct mem_range_desc *mrd);
84 static void i686_mrfetch(struct mem_range_softc *sc);
85 static int i686_mtrrtype(int flags);
86 static int i686_mrt2mtrr(int flags, int oldval);
87 static int i686_mtrrconflict(int flag1, int flag2);
88 static void i686_mrstore(struct mem_range_softc *sc);
89 static void i686_mrstoreone(void *arg);
90 static struct mem_range_desc *i686_mtrrfixsearch(struct mem_range_softc *sc,
92 static int i686_mrsetlow(struct mem_range_softc *sc,
93 struct mem_range_desc *mrd,
95 static int i686_mrsetvariable(struct mem_range_softc *sc,
96 struct mem_range_desc *mrd,
99 /* i686 MTRR type to memory range type conversion */
100 static int i686_mtrrtomrt[] = {
110 #define MTRRTOMRTLEN (sizeof(i686_mtrrtomrt) / sizeof(i686_mtrrtomrt[0]))
113 i686_mtrr2mrt(int val) {
114 if (val < 0 || val >= MTRRTOMRTLEN)
116 return i686_mtrrtomrt[val];
120 * i686 MTRR conflicts. Writeback and uncachable may overlap.
123 i686_mtrrconflict(int flag1, int flag2) {
124 flag1 &= MDF_ATTRMASK;
125 flag2 &= MDF_ATTRMASK;
126 if (flag1 == flag2 ||
127 (flag1 == MDF_WRITEBACK && flag2 == MDF_UNCACHEABLE) ||
128 (flag2 == MDF_WRITEBACK && flag1 == MDF_UNCACHEABLE))
134 * Look for an exactly-matching range.
136 static struct mem_range_desc *
137 mem_range_match(struct mem_range_softc *sc, struct mem_range_desc *mrd)
139 struct mem_range_desc *cand;
142 for (i = 0, cand = sc->mr_desc; i < sc->mr_ndesc; i++, cand++)
143 if ((cand->mr_base == mrd->mr_base) &&
144 (cand->mr_len == mrd->mr_len))
150 * Fetch the current mtrr settings from the current CPU (assumed to all
151 * be in sync in the SMP case). Note that if we are here, we assume
152 * that MTRRs are enabled, and we may or may not have fixed MTRRs.
155 i686_mrfetch(struct mem_range_softc *sc)
157 struct mem_range_desc *mrd;
163 /* Get fixed-range MTRRs */
164 if (sc->mr_cap & MR686_FIXMTRR) {
165 msr = MSR_MTRR64kBase;
166 for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
168 for (j = 0; j < 8; j++, mrd++) {
169 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
170 i686_mtrr2mrt(msrv & 0xff) |
172 if (mrd->mr_owner[0] == 0)
173 strcpy(mrd->mr_owner, mem_owner_bios);
177 msr = MSR_MTRR16kBase;
178 for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
180 for (j = 0; j < 8; j++, mrd++) {
181 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
182 i686_mtrr2mrt(msrv & 0xff) |
184 if (mrd->mr_owner[0] == 0)
185 strcpy(mrd->mr_owner, mem_owner_bios);
189 msr = MSR_MTRR4kBase;
190 for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
192 for (j = 0; j < 8; j++, mrd++) {
193 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
194 i686_mtrr2mrt(msrv & 0xff) |
196 if (mrd->mr_owner[0] == 0)
197 strcpy(mrd->mr_owner, mem_owner_bios);
203 /* Get remainder which must be variable MTRRs */
204 msr = MSR_MTRRVarBase;
205 for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
207 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
208 i686_mtrr2mrt(msrv & 0xff);
209 mrd->mr_base = msrv & 0x0000000ffffff000LL;
210 msrv = rdmsr(msr + 1);
211 mrd->mr_flags = (msrv & 0x800) ?
212 (mrd->mr_flags | MDF_ACTIVE) :
213 (mrd->mr_flags & ~MDF_ACTIVE);
214 /* Compute the range from the mask. Ick. */
215 mrd->mr_len = (~(msrv & 0x0000000ffffff000LL) & 0x0000000fffffffffLL) + 1;
216 if (!mrvalid(mrd->mr_base, mrd->mr_len))
217 mrd->mr_flags |= MDF_BOGUS;
218 /* If unclaimed and active, must be the BIOS */
219 if ((mrd->mr_flags & MDF_ACTIVE) && (mrd->mr_owner[0] == 0))
220 strcpy(mrd->mr_owner, mem_owner_bios);
225 * Return the MTRR memory type matching a region's flags
228 i686_mtrrtype(int flags)
232 flags &= MDF_ATTRMASK;
234 for (i = 0; i < MTRRTOMRTLEN; i++) {
235 if (i686_mtrrtomrt[i] == MDF_UNKNOWN)
237 if (flags == i686_mtrrtomrt[i])
244 i686_mrt2mtrr(int flags, int oldval)
248 if ((val = i686_mtrrtype(flags)) == -1)
249 return oldval & 0xff;
254 * Update running CPU(s) MTRRs to match the ranges in the descriptor
257 * XXX Must be called with interrupts enabled.
260 i686_mrstore(struct mem_range_softc *sc)
264 * We should use all_but_self_ipi() to call other CPUs into a
265 * locking gate, then call a target function to do this work.
266 * The "proper" solution involves a generalised locking gate
267 * implementation, not ready yet.
269 smp_rendezvous(NULL, i686_mrstoreone, NULL, (void *)sc);
271 disable_intr(); /* disable interrupts */
272 i686_mrstoreone((void *)sc);
278 * Update the current CPU's MTRRs with those represented in the
279 * descriptor list. Note that we do this wholesale rather than
280 * just stuffing one entry; this is simpler (but slower, of course).
283 i686_mrstoreone(void *arg)
285 struct mem_range_softc *sc = (struct mem_range_softc *)arg;
286 struct mem_range_desc *mrd;
287 u_int64_t omsrv, msrv;
293 cr4save = rcr4(); /* save cr4 */
294 if (cr4save & CR4_PGE)
295 load_cr4(cr4save & ~CR4_PGE);
296 load_cr0((rcr0() & ~CR0_NW) | CR0_CD); /* disable caches (CD = 1, NW = 0) */
297 wbinvd(); /* flush caches, TLBs */
298 wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~0x800); /* disable MTRRs (E = 0) */
300 /* Set fixed-range MTRRs */
301 if (sc->mr_cap & MR686_FIXMTRR) {
302 msr = MSR_MTRR64kBase;
303 for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
306 for (j = 7; j >= 0; j--) {
308 msrv |= i686_mrt2mtrr((mrd + j)->mr_flags, omsrv >> (j*8));
313 msr = MSR_MTRR16kBase;
314 for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
317 for (j = 7; j >= 0; j--) {
319 msrv |= i686_mrt2mtrr((mrd + j)->mr_flags, omsrv >> (j*8));
324 msr = MSR_MTRR4kBase;
325 for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
328 for (j = 7; j >= 0; j--) {
330 msrv |= i686_mrt2mtrr((mrd + j)->mr_flags, omsrv >> (j*8));
337 /* Set remainder which must be variable MTRRs */
338 msr = MSR_MTRRVarBase;
339 for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
340 /* base/type register */
342 if (mrd->mr_flags & MDF_ACTIVE) {
343 msrv = mrd->mr_base & 0x0000000ffffff000LL;
344 msrv |= i686_mrt2mtrr(mrd->mr_flags, omsrv);
350 /* mask/active register */
351 if (mrd->mr_flags & MDF_ACTIVE) {
352 msrv = 0x800 | (~(mrd->mr_len - 1) & 0x0000000ffffff000LL);
356 wrmsr(msr + 1, msrv);
358 wbinvd(); /* flush caches, TLBs */
359 wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) | 0x800); /* restore MTRR state */
360 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* enable caches CD = 0 and NW = 0 */
361 load_cr4(cr4save); /* restore cr4 */
365 * Hunt for the fixed MTRR referencing (addr)
367 static struct mem_range_desc *
368 i686_mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr)
370 struct mem_range_desc *mrd;
373 for (i = 0, mrd = sc->mr_desc; i < (MTRR_N64K + MTRR_N16K + MTRR_N4K); i++, mrd++)
374 if ((addr >= mrd->mr_base) && (addr < (mrd->mr_base + mrd->mr_len)))
380 * Try to satisfy the given range request by manipulating the fixed MTRRs that
383 * Note that we try to be generous here; we'll bloat the range out to the
384 * next higher/lower boundary to avoid the consumer having to know too much
385 * about the mechanisms here.
387 * XXX note that this will have to be updated when we start supporting "busy" ranges.
390 i686_mrsetlow(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
392 struct mem_range_desc *first_md, *last_md, *curr_md;
395 if (((first_md = i686_mtrrfixsearch(sc, mrd->mr_base)) == NULL) ||
396 ((last_md = i686_mtrrfixsearch(sc, mrd->mr_base + mrd->mr_len - 1)) == NULL))
399 /* check we aren't doing something risky */
400 if (!(mrd->mr_flags & MDF_FORCE))
401 for (curr_md = first_md; curr_md <= last_md; curr_md++) {
402 if ((curr_md->mr_flags & MDF_ATTRMASK) == MDF_UNKNOWN)
406 /* set flags, clear set-by-firmware flag */
407 for (curr_md = first_md; curr_md <= last_md; curr_md++) {
408 curr_md->mr_flags = mrcopyflags(curr_md->mr_flags & ~MDF_FIRMWARE, mrd->mr_flags);
409 bcopy(mrd->mr_owner, curr_md->mr_owner, sizeof(mrd->mr_owner));
417 * Modify/add a variable MTRR to satisfy the request.
419 * XXX needs to be updated to properly support "busy" ranges.
422 i686_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
424 struct mem_range_desc *curr_md, *free_md;
428 * Scan the currently active variable descriptors, look for
429 * one we exactly match (straight takeover) and for possible
430 * accidental overlaps.
431 * Keep track of the first empty variable descriptor in case we
432 * can't perform a takeover.
434 i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
435 curr_md = sc->mr_desc + i;
437 for (; i < sc->mr_ndesc; i++, curr_md++) {
438 if (curr_md->mr_flags & MDF_ACTIVE) {
440 if ((curr_md->mr_base == mrd->mr_base) &&
441 (curr_md->mr_len == mrd->mr_len)) {
442 /* whoops, owned by someone */
443 if (curr_md->mr_flags & MDF_BUSY)
445 /* check we aren't doing something risky */
446 if (!(mrd->mr_flags & MDF_FORCE) &&
447 ((curr_md->mr_flags & MDF_ATTRMASK) == MDF_UNKNOWN))
449 /* Ok, just hijack this entry */
453 /* non-exact overlap ? */
454 if (mroverlap(curr_md, mrd)) {
455 /* between conflicting region types? */
456 if (i686_mtrrconflict(curr_md->mr_flags, mrd->mr_flags))
459 } else if (free_md == NULL) {
463 /* got somewhere to put it? */
467 /* Set up new descriptor */
468 free_md->mr_base = mrd->mr_base;
469 free_md->mr_len = mrd->mr_len;
470 free_md->mr_flags = mrcopyflags(MDF_ACTIVE, mrd->mr_flags);
471 bcopy(mrd->mr_owner, free_md->mr_owner, sizeof(mrd->mr_owner));
476 * Handle requests to set memory range attributes by manipulating MTRRs.
480 i686_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
482 struct mem_range_desc *targ;
486 case MEMRANGE_SET_UPDATE:
487 /* make sure that what's being asked for is even possible at all */
488 if (!mrvalid(mrd->mr_base, mrd->mr_len) ||
489 i686_mtrrtype(mrd->mr_flags) == -1)
492 #define FIXTOP ((MTRR_N64K * 0x10000) + (MTRR_N16K * 0x4000) + (MTRR_N4K * 0x1000))
494 /* are the "low memory" conditions applicable? */
495 if ((sc->mr_cap & MR686_FIXMTRR) &&
496 ((mrd->mr_base + mrd->mr_len) <= FIXTOP)) {
497 if ((error = i686_mrsetlow(sc, mrd, arg)) != 0)
500 /* it's time to play with variable MTRRs */
501 if ((error = i686_mrsetvariable(sc, mrd, arg)) != 0)
506 case MEMRANGE_SET_REMOVE:
507 if ((targ = mem_range_match(sc, mrd)) == NULL)
509 if (targ->mr_flags & MDF_FIXACTIVE)
511 if (targ->mr_flags & MDF_BUSY)
513 targ->mr_flags &= ~MDF_ACTIVE;
514 targ->mr_owner[0] = 0;
521 /* update the hardware */
523 i686_mrfetch(sc); /* refetch to see where we're at */
528 * Work out how many ranges we support, initialise storage for them,
529 * fetch the initial settings.
532 i686_mrinit(struct mem_range_softc *sc)
534 struct mem_range_desc *mrd;
538 mtrrcap = rdmsr(MSR_MTRRcap);
539 mtrrdef = rdmsr(MSR_MTRRdefType);
541 /* For now, bail out if MTRRs are not enabled */
542 if (!(mtrrdef & 0x800)) {
544 printf("CPU supports MTRRs but not enabled\n");
547 nmdesc = mtrrcap & 0xff;
548 printf("Pentium Pro MTRR support enabled\n");
550 /* If fixed MTRRs supported and enabled */
551 if ((mtrrcap & 0x100) && (mtrrdef & 0x400)) {
552 sc->mr_cap = MR686_FIXMTRR;
553 nmdesc += MTRR_N64K + MTRR_N16K + MTRR_N4K;
557 (struct mem_range_desc *)malloc(nmdesc * sizeof(struct mem_range_desc),
558 M_MEMDESC, M_WAITOK);
559 bzero(sc->mr_desc, nmdesc * sizeof(struct mem_range_desc));
560 sc->mr_ndesc = nmdesc;
564 /* Populate the fixed MTRR entries' base/length */
565 if (sc->mr_cap & MR686_FIXMTRR) {
566 for (i = 0; i < MTRR_N64K; i++, mrd++) {
567 mrd->mr_base = i * 0x10000;
568 mrd->mr_len = 0x10000;
569 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE;
571 for (i = 0; i < MTRR_N16K; i++, mrd++) {
572 mrd->mr_base = i * 0x4000 + 0x80000;
573 mrd->mr_len = 0x4000;
574 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE;
576 for (i = 0; i < MTRR_N4K; i++, mrd++) {
577 mrd->mr_base = i * 0x1000 + 0xc0000;
578 mrd->mr_len = 0x1000;
579 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE;
584 * Get current settings, anything set now is considered to have
585 * been set by the firmware. (XXX has something already played here?)
589 for (i = 0; i < sc->mr_ndesc; i++, mrd++) {
590 if (mrd->mr_flags & MDF_ACTIVE)
591 mrd->mr_flags |= MDF_FIRMWARE;
596 * Initialise MTRRs on an AP after the BSP has run the init code.
599 i686_mrAPinit(struct mem_range_softc *sc)
601 i686_mrstoreone((void *)sc); /* set MTRRs to match BSP */
602 wrmsr(MSR_MTRRdefType, mtrrdef); /* set MTRR behaviour to match BSP */
606 i686_mem_drvinit(void *unused)
608 /* Try for i686 MTRRs */
609 if ((cpu_feature & CPUID_MTRR) &&
610 ((cpu_id & 0xf00) == 0x600 || (cpu_id & 0xf00) == 0xf00) &&
611 ((strcmp(cpu_vendor, "GenuineIntel") == 0) ||
612 (strcmp(cpu_vendor, "AuthenticAMD") == 0))) {
613 mem_range_softc.mr_op = &i686_mrops;
617 SYSINIT(i686memdev,SI_SUB_DRIVERS,SI_ORDER_FIRST,i686_mem_drvinit,NULL)