2 * Copyright (c) 1999 Michael Smith <msmith@freebsd.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/i386/i386/i686_mem.c,v 1.8.2.4 2002/09/24 08:12:51 mdodd Exp $
27 * $DragonFly: src/sys/i386/i386/Attic/i686_mem.c,v 1.4 2005/08/29 21:08:02 dillon Exp $
30 #include <sys/param.h>
31 #include <sys/kernel.h>
32 #include <sys/systm.h>
33 #include <sys/malloc.h>
34 #include <sys/memrange.h>
35 #include <sys/thread.h>
37 #include <machine/md_var.h>
38 #include <machine/specialreg.h>
41 #include <machine/smp.h>
43 #include <machine/lock.h>
46 * i686 memory range operations
48 * This code will probably be impenetrable without reference to the
49 * Intel Pentium Pro documentation.
52 static char *mem_owner_bios = "BIOS";
54 #define MR686_FIXMTRR (1<<0)
56 #define mrwithin(mr, a) \
57 (((a) >= (mr)->mr_base) && ((a) < ((mr)->mr_base + (mr)->mr_len)))
58 #define mroverlap(mra, mrb) \
59 (mrwithin(mra, mrb->mr_base) || mrwithin(mrb, mra->mr_base))
61 #define mrvalid(base, len) \
62 ((!(base & ((1 << 12) - 1))) && /* base is multiple of 4k */ \
63 ((len) >= (1 << 12)) && /* length is >= 4k */ \
64 powerof2((len)) && /* ... and power of two */ \
65 !((base) & ((len) - 1))) /* range is not discontiuous */
67 #define mrcopyflags(curr, new) (((curr) & ~MDF_ATTRMASK) | ((new) & MDF_ATTRMASK))
69 static void i686_mrinit(struct mem_range_softc *sc);
70 static int i686_mrset(struct mem_range_softc *sc,
71 struct mem_range_desc *mrd,
73 static void i686_mrAPinit(struct mem_range_softc *sc);
75 static struct mem_range_ops i686_mrops = {
81 /* XXX for AP startup hook */
82 static u_int64_t mtrrcap, mtrrdef;
84 static struct mem_range_desc *mem_range_match(struct mem_range_softc *sc,
85 struct mem_range_desc *mrd);
86 static void i686_mrfetch(struct mem_range_softc *sc);
87 static int i686_mtrrtype(int flags);
88 static int i686_mrt2mtrr(int flags, int oldval);
89 static int i686_mtrrconflict(int flag1, int flag2);
90 static void i686_mrstore(struct mem_range_softc *sc);
91 static void i686_mrstoreone(void *arg);
93 static void i686_mrstoreone_cpusync(struct lwkt_cpusync *cmd);
95 static struct mem_range_desc *i686_mtrrfixsearch(struct mem_range_softc *sc,
97 static int i686_mrsetlow(struct mem_range_softc *sc,
98 struct mem_range_desc *mrd,
100 static int i686_mrsetvariable(struct mem_range_softc *sc,
101 struct mem_range_desc *mrd,
104 /* i686 MTRR type to memory range type conversion */
105 static int i686_mtrrtomrt[] = {
115 #define MTRRTOMRTLEN (sizeof(i686_mtrrtomrt) / sizeof(i686_mtrrtomrt[0]))
118 i686_mtrr2mrt(int val) {
119 if (val < 0 || val >= MTRRTOMRTLEN)
121 return i686_mtrrtomrt[val];
125 * i686 MTRR conflicts. Writeback and uncachable may overlap.
128 i686_mtrrconflict(int flag1, int flag2) {
129 flag1 &= MDF_ATTRMASK;
130 flag2 &= MDF_ATTRMASK;
131 if (flag1 == flag2 ||
132 (flag1 == MDF_WRITEBACK && flag2 == MDF_UNCACHEABLE) ||
133 (flag2 == MDF_WRITEBACK && flag1 == MDF_UNCACHEABLE))
139 * Look for an exactly-matching range.
141 static struct mem_range_desc *
142 mem_range_match(struct mem_range_softc *sc, struct mem_range_desc *mrd)
144 struct mem_range_desc *cand;
147 for (i = 0, cand = sc->mr_desc; i < sc->mr_ndesc; i++, cand++)
148 if ((cand->mr_base == mrd->mr_base) &&
149 (cand->mr_len == mrd->mr_len))
155 * Fetch the current mtrr settings from the current CPU (assumed to all
156 * be in sync in the SMP case). Note that if we are here, we assume
157 * that MTRRs are enabled, and we may or may not have fixed MTRRs.
160 i686_mrfetch(struct mem_range_softc *sc)
162 struct mem_range_desc *mrd;
168 /* Get fixed-range MTRRs */
169 if (sc->mr_cap & MR686_FIXMTRR) {
170 msr = MSR_MTRR64kBase;
171 for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
173 for (j = 0; j < 8; j++, mrd++) {
174 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
175 i686_mtrr2mrt(msrv & 0xff) |
177 if (mrd->mr_owner[0] == 0)
178 strcpy(mrd->mr_owner, mem_owner_bios);
182 msr = MSR_MTRR16kBase;
183 for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
185 for (j = 0; j < 8; j++, mrd++) {
186 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
187 i686_mtrr2mrt(msrv & 0xff) |
189 if (mrd->mr_owner[0] == 0)
190 strcpy(mrd->mr_owner, mem_owner_bios);
194 msr = MSR_MTRR4kBase;
195 for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
197 for (j = 0; j < 8; j++, mrd++) {
198 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
199 i686_mtrr2mrt(msrv & 0xff) |
201 if (mrd->mr_owner[0] == 0)
202 strcpy(mrd->mr_owner, mem_owner_bios);
208 /* Get remainder which must be variable MTRRs */
209 msr = MSR_MTRRVarBase;
210 for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
212 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
213 i686_mtrr2mrt(msrv & 0xff);
214 mrd->mr_base = msrv & 0x0000000ffffff000LL;
215 msrv = rdmsr(msr + 1);
216 mrd->mr_flags = (msrv & 0x800) ?
217 (mrd->mr_flags | MDF_ACTIVE) :
218 (mrd->mr_flags & ~MDF_ACTIVE);
219 /* Compute the range from the mask. Ick. */
220 mrd->mr_len = (~(msrv & 0x0000000ffffff000LL) & 0x0000000fffffffffLL) + 1;
221 if (!mrvalid(mrd->mr_base, mrd->mr_len))
222 mrd->mr_flags |= MDF_BOGUS;
223 /* If unclaimed and active, must be the BIOS */
224 if ((mrd->mr_flags & MDF_ACTIVE) && (mrd->mr_owner[0] == 0))
225 strcpy(mrd->mr_owner, mem_owner_bios);
230 * Return the MTRR memory type matching a region's flags
233 i686_mtrrtype(int flags)
237 flags &= MDF_ATTRMASK;
239 for (i = 0; i < MTRRTOMRTLEN; i++) {
240 if (i686_mtrrtomrt[i] == MDF_UNKNOWN)
242 if (flags == i686_mtrrtomrt[i])
249 i686_mrt2mtrr(int flags, int oldval)
253 if ((val = i686_mtrrtype(flags)) == -1)
254 return oldval & 0xff;
259 * Update running CPU(s) MTRRs to match the ranges in the descriptor
262 * XXX Must be called with interrupts enabled.
265 i686_mrstore(struct mem_range_softc *sc)
269 * We should use all_but_self_ipi() to call other CPUs into a
270 * locking gate, then call a target function to do this work.
271 * The "proper" solution involves a generalised locking gate
272 * implementation, not ready yet.
274 lwkt_cpusync_simple(-1, i686_mrstoreone_cpusync, sc);
276 mpintr_lock(); /* doesn't have to be mpintr YYY */
277 i686_mrstoreone((void *)sc);
285 i686_mrstoreone_cpusync(struct lwkt_cpusync *cmd)
287 i686_mrstoreone(cmd->cs_data);
293 * Update the current CPU's MTRRs with those represented in the
294 * descriptor list. Note that we do this wholesale rather than
295 * just stuffing one entry; this is simpler (but slower, of course).
298 i686_mrstoreone(void *arg)
300 struct mem_range_softc *sc = (struct mem_range_softc *)arg;
301 struct mem_range_desc *mrd;
302 u_int64_t omsrv, msrv;
308 cr4save = rcr4(); /* save cr4 */
309 if (cr4save & CR4_PGE)
310 load_cr4(cr4save & ~CR4_PGE);
311 load_cr0((rcr0() & ~CR0_NW) | CR0_CD); /* disable caches (CD = 1, NW = 0) */
312 wbinvd(); /* flush caches, TLBs */
313 wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~0x800); /* disable MTRRs (E = 0) */
315 /* Set fixed-range MTRRs */
316 if (sc->mr_cap & MR686_FIXMTRR) {
317 msr = MSR_MTRR64kBase;
318 for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
321 for (j = 7; j >= 0; j--) {
323 msrv |= i686_mrt2mtrr((mrd + j)->mr_flags, omsrv >> (j*8));
328 msr = MSR_MTRR16kBase;
329 for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
332 for (j = 7; j >= 0; j--) {
334 msrv |= i686_mrt2mtrr((mrd + j)->mr_flags, omsrv >> (j*8));
339 msr = MSR_MTRR4kBase;
340 for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
343 for (j = 7; j >= 0; j--) {
345 msrv |= i686_mrt2mtrr((mrd + j)->mr_flags, omsrv >> (j*8));
352 /* Set remainder which must be variable MTRRs */
353 msr = MSR_MTRRVarBase;
354 for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
355 /* base/type register */
357 if (mrd->mr_flags & MDF_ACTIVE) {
358 msrv = mrd->mr_base & 0x0000000ffffff000LL;
359 msrv |= i686_mrt2mtrr(mrd->mr_flags, omsrv);
365 /* mask/active register */
366 if (mrd->mr_flags & MDF_ACTIVE) {
367 msrv = 0x800 | (~(mrd->mr_len - 1) & 0x0000000ffffff000LL);
371 wrmsr(msr + 1, msrv);
373 wbinvd(); /* flush caches, TLBs */
374 wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) | 0x800); /* restore MTRR state */
375 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* enable caches CD = 0 and NW = 0 */
376 load_cr4(cr4save); /* restore cr4 */
380 * Hunt for the fixed MTRR referencing (addr)
382 static struct mem_range_desc *
383 i686_mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr)
385 struct mem_range_desc *mrd;
388 for (i = 0, mrd = sc->mr_desc; i < (MTRR_N64K + MTRR_N16K + MTRR_N4K); i++, mrd++)
389 if ((addr >= mrd->mr_base) && (addr < (mrd->mr_base + mrd->mr_len)))
395 * Try to satisfy the given range request by manipulating the fixed MTRRs that
398 * Note that we try to be generous here; we'll bloat the range out to the
399 * next higher/lower boundary to avoid the consumer having to know too much
400 * about the mechanisms here.
402 * XXX note that this will have to be updated when we start supporting "busy" ranges.
405 i686_mrsetlow(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
407 struct mem_range_desc *first_md, *last_md, *curr_md;
410 if (((first_md = i686_mtrrfixsearch(sc, mrd->mr_base)) == NULL) ||
411 ((last_md = i686_mtrrfixsearch(sc, mrd->mr_base + mrd->mr_len - 1)) == NULL))
414 /* check we aren't doing something risky */
415 if (!(mrd->mr_flags & MDF_FORCE))
416 for (curr_md = first_md; curr_md <= last_md; curr_md++) {
417 if ((curr_md->mr_flags & MDF_ATTRMASK) == MDF_UNKNOWN)
421 /* set flags, clear set-by-firmware flag */
422 for (curr_md = first_md; curr_md <= last_md; curr_md++) {
423 curr_md->mr_flags = mrcopyflags(curr_md->mr_flags & ~MDF_FIRMWARE, mrd->mr_flags);
424 bcopy(mrd->mr_owner, curr_md->mr_owner, sizeof(mrd->mr_owner));
432 * Modify/add a variable MTRR to satisfy the request.
434 * XXX needs to be updated to properly support "busy" ranges.
437 i686_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
439 struct mem_range_desc *curr_md, *free_md;
443 * Scan the currently active variable descriptors, look for
444 * one we exactly match (straight takeover) and for possible
445 * accidental overlaps.
446 * Keep track of the first empty variable descriptor in case we
447 * can't perform a takeover.
449 i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
450 curr_md = sc->mr_desc + i;
452 for (; i < sc->mr_ndesc; i++, curr_md++) {
453 if (curr_md->mr_flags & MDF_ACTIVE) {
455 if ((curr_md->mr_base == mrd->mr_base) &&
456 (curr_md->mr_len == mrd->mr_len)) {
457 /* whoops, owned by someone */
458 if (curr_md->mr_flags & MDF_BUSY)
460 /* check we aren't doing something risky */
461 if (!(mrd->mr_flags & MDF_FORCE) &&
462 ((curr_md->mr_flags & MDF_ATTRMASK) == MDF_UNKNOWN))
464 /* Ok, just hijack this entry */
468 /* non-exact overlap ? */
469 if (mroverlap(curr_md, mrd)) {
470 /* between conflicting region types? */
471 if (i686_mtrrconflict(curr_md->mr_flags, mrd->mr_flags))
474 } else if (free_md == NULL) {
478 /* got somewhere to put it? */
482 /* Set up new descriptor */
483 free_md->mr_base = mrd->mr_base;
484 free_md->mr_len = mrd->mr_len;
485 free_md->mr_flags = mrcopyflags(MDF_ACTIVE, mrd->mr_flags);
486 bcopy(mrd->mr_owner, free_md->mr_owner, sizeof(mrd->mr_owner));
491 * Handle requests to set memory range attributes by manipulating MTRRs.
495 i686_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
497 struct mem_range_desc *targ;
501 case MEMRANGE_SET_UPDATE:
502 /* make sure that what's being asked for is even possible at all */
503 if (!mrvalid(mrd->mr_base, mrd->mr_len) ||
504 i686_mtrrtype(mrd->mr_flags) == -1)
507 #define FIXTOP ((MTRR_N64K * 0x10000) + (MTRR_N16K * 0x4000) + (MTRR_N4K * 0x1000))
509 /* are the "low memory" conditions applicable? */
510 if ((sc->mr_cap & MR686_FIXMTRR) &&
511 ((mrd->mr_base + mrd->mr_len) <= FIXTOP)) {
512 if ((error = i686_mrsetlow(sc, mrd, arg)) != 0)
515 /* it's time to play with variable MTRRs */
516 if ((error = i686_mrsetvariable(sc, mrd, arg)) != 0)
521 case MEMRANGE_SET_REMOVE:
522 if ((targ = mem_range_match(sc, mrd)) == NULL)
524 if (targ->mr_flags & MDF_FIXACTIVE)
526 if (targ->mr_flags & MDF_BUSY)
528 targ->mr_flags &= ~MDF_ACTIVE;
529 targ->mr_owner[0] = 0;
536 /* update the hardware */
538 i686_mrfetch(sc); /* refetch to see where we're at */
543 * Work out how many ranges we support, initialise storage for them,
544 * fetch the initial settings.
547 i686_mrinit(struct mem_range_softc *sc)
549 struct mem_range_desc *mrd;
553 mtrrcap = rdmsr(MSR_MTRRcap);
554 mtrrdef = rdmsr(MSR_MTRRdefType);
556 /* For now, bail out if MTRRs are not enabled */
557 if (!(mtrrdef & 0x800)) {
559 printf("CPU supports MTRRs but not enabled\n");
562 nmdesc = mtrrcap & 0xff;
563 printf("Pentium Pro MTRR support enabled\n");
565 /* If fixed MTRRs supported and enabled */
566 if ((mtrrcap & 0x100) && (mtrrdef & 0x400)) {
567 sc->mr_cap = MR686_FIXMTRR;
568 nmdesc += MTRR_N64K + MTRR_N16K + MTRR_N4K;
572 (struct mem_range_desc *)malloc(nmdesc * sizeof(struct mem_range_desc),
573 M_MEMDESC, M_WAITOK);
574 bzero(sc->mr_desc, nmdesc * sizeof(struct mem_range_desc));
575 sc->mr_ndesc = nmdesc;
579 /* Populate the fixed MTRR entries' base/length */
580 if (sc->mr_cap & MR686_FIXMTRR) {
581 for (i = 0; i < MTRR_N64K; i++, mrd++) {
582 mrd->mr_base = i * 0x10000;
583 mrd->mr_len = 0x10000;
584 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE;
586 for (i = 0; i < MTRR_N16K; i++, mrd++) {
587 mrd->mr_base = i * 0x4000 + 0x80000;
588 mrd->mr_len = 0x4000;
589 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE;
591 for (i = 0; i < MTRR_N4K; i++, mrd++) {
592 mrd->mr_base = i * 0x1000 + 0xc0000;
593 mrd->mr_len = 0x1000;
594 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE;
599 * Get current settings, anything set now is considered to have
600 * been set by the firmware. (XXX has something already played here?)
604 for (i = 0; i < sc->mr_ndesc; i++, mrd++) {
605 if (mrd->mr_flags & MDF_ACTIVE)
606 mrd->mr_flags |= MDF_FIRMWARE;
611 * Initialise MTRRs on an AP after the BSP has run the init code.
614 i686_mrAPinit(struct mem_range_softc *sc)
616 i686_mrstoreone((void *)sc); /* set MTRRs to match BSP */
617 wrmsr(MSR_MTRRdefType, mtrrdef); /* set MTRR behaviour to match BSP */
621 i686_mem_drvinit(void *unused)
623 /* Try for i686 MTRRs */
624 if ((cpu_feature & CPUID_MTRR) &&
625 ((cpu_id & 0xf00) == 0x600 || (cpu_id & 0xf00) == 0xf00) &&
626 ((strcmp(cpu_vendor, "GenuineIntel") == 0) ||
627 (strcmp(cpu_vendor, "AuthenticAMD") == 0))) {
628 mem_range_softc.mr_op = &i686_mrops;
632 SYSINIT(i686memdev,SI_SUB_DRIVERS,SI_ORDER_FIRST,i686_mem_drvinit,NULL)