2 * Copyright (c) 1999 Michael Smith <msmith@freebsd.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/i386/i386/i686_mem.c,v 1.8.2.4 2002/09/24 08:12:51 mdodd Exp $
27 * $DragonFly: src/sys/platform/pc32/i386/i686_mem.c,v 1.6 2006/12/17 20:07:32 dillon Exp $
30 #include <sys/param.h>
31 #include <sys/kernel.h>
32 #include <sys/systm.h>
33 #include <sys/malloc.h>
34 #include <sys/memrange.h>
35 #include <sys/thread.h>
37 #include <sys/thread2.h>
39 #include <machine/md_var.h>
40 #include <machine/psl.h>
41 #include <machine/specialreg.h>
44 #include <machine/smp.h>
46 #include <machine/lock.h>
49 * i686 memory range operations
51 * This code will probably be impenetrable without reference to the
52 * Intel Pentium Pro documentation.
55 static char *mem_owner_bios = "BIOS";
57 #define MR686_FIXMTRR (1<<0)
59 #define mrwithin(mr, a) \
60 (((a) >= (mr)->mr_base) && ((a) < ((mr)->mr_base + (mr)->mr_len)))
61 #define mroverlap(mra, mrb) \
62 (mrwithin(mra, mrb->mr_base) || mrwithin(mrb, mra->mr_base))
64 #define mrvalid(base, len) \
65 ((!(base & ((1 << 12) - 1))) && /* base is multiple of 4k */ \
66 ((len) >= (1 << 12)) && /* length is >= 4k */ \
67 powerof2((len)) && /* ... and power of two */ \
68 !((base) & ((len) - 1))) /* range is not discontiuous */
70 #define mrcopyflags(curr, new) (((curr) & ~MDF_ATTRMASK) | ((new) & MDF_ATTRMASK))
72 static void i686_mrinit(struct mem_range_softc *sc);
73 static int i686_mrset(struct mem_range_softc *sc,
74 struct mem_range_desc *mrd,
76 static void i686_mrAPinit(struct mem_range_softc *sc);
78 static struct mem_range_ops i686_mrops = {
84 /* XXX for AP startup hook */
85 static u_int64_t mtrrcap, mtrrdef;
87 static struct mem_range_desc *mem_range_match(struct mem_range_softc *sc,
88 struct mem_range_desc *mrd);
89 static void i686_mrfetch(struct mem_range_softc *sc);
90 static int i686_mtrrtype(int flags);
91 static int i686_mrt2mtrr(int flags, int oldval);
92 static int i686_mtrrconflict(int flag1, int flag2);
93 static void i686_mrstore(struct mem_range_softc *sc);
94 static void i686_mrstoreone(void *arg);
96 static void i686_mrstoreone_cpusync(struct lwkt_cpusync *cmd);
98 static struct mem_range_desc *i686_mtrrfixsearch(struct mem_range_softc *sc,
100 static int i686_mrsetlow(struct mem_range_softc *sc,
101 struct mem_range_desc *mrd,
103 static int i686_mrsetvariable(struct mem_range_softc *sc,
104 struct mem_range_desc *mrd,
107 /* i686 MTRR type to memory range type conversion */
108 static int i686_mtrrtomrt[] = {
118 #define MTRRTOMRTLEN (sizeof(i686_mtrrtomrt) / sizeof(i686_mtrrtomrt[0]))
121 i686_mtrr2mrt(int val) {
122 if (val < 0 || val >= MTRRTOMRTLEN)
124 return i686_mtrrtomrt[val];
128 * i686 MTRR conflicts. Writeback and uncachable may overlap.
131 i686_mtrrconflict(int flag1, int flag2) {
132 flag1 &= MDF_ATTRMASK;
133 flag2 &= MDF_ATTRMASK;
134 if (flag1 == flag2 ||
135 (flag1 == MDF_WRITEBACK && flag2 == MDF_UNCACHEABLE) ||
136 (flag2 == MDF_WRITEBACK && flag1 == MDF_UNCACHEABLE))
142 * Look for an exactly-matching range.
144 static struct mem_range_desc *
145 mem_range_match(struct mem_range_softc *sc, struct mem_range_desc *mrd)
147 struct mem_range_desc *cand;
150 for (i = 0, cand = sc->mr_desc; i < sc->mr_ndesc; i++, cand++)
151 if ((cand->mr_base == mrd->mr_base) &&
152 (cand->mr_len == mrd->mr_len))
158 * Fetch the current mtrr settings from the current CPU (assumed to all
159 * be in sync in the SMP case). Note that if we are here, we assume
160 * that MTRRs are enabled, and we may or may not have fixed MTRRs.
163 i686_mrfetch(struct mem_range_softc *sc)
165 struct mem_range_desc *mrd;
171 /* Get fixed-range MTRRs */
172 if (sc->mr_cap & MR686_FIXMTRR) {
173 msr = MSR_MTRR64kBase;
174 for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
176 for (j = 0; j < 8; j++, mrd++) {
177 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
178 i686_mtrr2mrt(msrv & 0xff) |
180 if (mrd->mr_owner[0] == 0)
181 strcpy(mrd->mr_owner, mem_owner_bios);
185 msr = MSR_MTRR16kBase;
186 for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
188 for (j = 0; j < 8; j++, mrd++) {
189 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
190 i686_mtrr2mrt(msrv & 0xff) |
192 if (mrd->mr_owner[0] == 0)
193 strcpy(mrd->mr_owner, mem_owner_bios);
197 msr = MSR_MTRR4kBase;
198 for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
200 for (j = 0; j < 8; j++, mrd++) {
201 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
202 i686_mtrr2mrt(msrv & 0xff) |
204 if (mrd->mr_owner[0] == 0)
205 strcpy(mrd->mr_owner, mem_owner_bios);
211 /* Get remainder which must be variable MTRRs */
212 msr = MSR_MTRRVarBase;
213 for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
215 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
216 i686_mtrr2mrt(msrv & 0xff);
217 mrd->mr_base = msrv & 0x0000000ffffff000LL;
218 msrv = rdmsr(msr + 1);
219 mrd->mr_flags = (msrv & 0x800) ?
220 (mrd->mr_flags | MDF_ACTIVE) :
221 (mrd->mr_flags & ~MDF_ACTIVE);
222 /* Compute the range from the mask. Ick. */
223 mrd->mr_len = (~(msrv & 0x0000000ffffff000LL) & 0x0000000fffffffffLL) + 1;
224 if (!mrvalid(mrd->mr_base, mrd->mr_len))
225 mrd->mr_flags |= MDF_BOGUS;
226 /* If unclaimed and active, must be the BIOS */
227 if ((mrd->mr_flags & MDF_ACTIVE) && (mrd->mr_owner[0] == 0))
228 strcpy(mrd->mr_owner, mem_owner_bios);
233 * Return the MTRR memory type matching a region's flags
236 i686_mtrrtype(int flags)
240 flags &= MDF_ATTRMASK;
242 for (i = 0; i < MTRRTOMRTLEN; i++) {
243 if (i686_mtrrtomrt[i] == MDF_UNKNOWN)
245 if (flags == i686_mtrrtomrt[i])
252 i686_mrt2mtrr(int flags, int oldval)
256 if ((val = i686_mtrrtype(flags)) == -1)
257 return oldval & 0xff;
262 * Update running CPU(s) MTRRs to match the ranges in the descriptor
265 * XXX Must be called with interrupts enabled.
268 i686_mrstore(struct mem_range_softc *sc)
272 * We should use all_but_self_ipi() to call other CPUs into a
273 * locking gate, then call a target function to do this work.
274 * The "proper" solution involves a generalised locking gate
275 * implementation, not ready yet.
277 lwkt_cpusync_simple(-1, i686_mrstoreone_cpusync, sc);
279 mpintr_lock(); /* doesn't have to be mpintr YYY */
280 i686_mrstoreone((void *)sc);
288 i686_mrstoreone_cpusync(struct lwkt_cpusync *cmd)
290 i686_mrstoreone(cmd->cs_data);
296 * Update the current CPU's MTRRs with those represented in the
297 * descriptor list. Note that we do this wholesale rather than
298 * just stuffing one entry; this is simpler (but slower, of course).
301 i686_mrstoreone(void *arg)
303 struct mem_range_softc *sc = (struct mem_range_softc *)arg;
304 struct mem_range_desc *mrd;
305 u_int64_t omsrv, msrv;
311 cr4save = rcr4(); /* save cr4 */
312 if (cr4save & CR4_PGE)
313 load_cr4(cr4save & ~CR4_PGE);
314 load_cr0((rcr0() & ~CR0_NW) | CR0_CD); /* disable caches (CD = 1, NW = 0) */
315 wbinvd(); /* flush caches, TLBs */
316 wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~0x800); /* disable MTRRs (E = 0) */
318 /* Set fixed-range MTRRs */
319 if (sc->mr_cap & MR686_FIXMTRR) {
320 msr = MSR_MTRR64kBase;
321 for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
324 for (j = 7; j >= 0; j--) {
326 msrv |= i686_mrt2mtrr((mrd + j)->mr_flags, omsrv >> (j*8));
331 msr = MSR_MTRR16kBase;
332 for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
335 for (j = 7; j >= 0; j--) {
337 msrv |= i686_mrt2mtrr((mrd + j)->mr_flags, omsrv >> (j*8));
342 msr = MSR_MTRR4kBase;
343 for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
346 for (j = 7; j >= 0; j--) {
348 msrv |= i686_mrt2mtrr((mrd + j)->mr_flags, omsrv >> (j*8));
355 /* Set remainder which must be variable MTRRs */
356 msr = MSR_MTRRVarBase;
357 for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
358 /* base/type register */
360 if (mrd->mr_flags & MDF_ACTIVE) {
361 msrv = mrd->mr_base & 0x0000000ffffff000LL;
362 msrv |= i686_mrt2mtrr(mrd->mr_flags, omsrv);
368 /* mask/active register */
369 if (mrd->mr_flags & MDF_ACTIVE) {
370 msrv = 0x800 | (~(mrd->mr_len - 1) & 0x0000000ffffff000LL);
374 wrmsr(msr + 1, msrv);
376 wbinvd(); /* flush caches, TLBs */
377 wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) | 0x800); /* restore MTRR state */
378 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* enable caches CD = 0 and NW = 0 */
379 load_cr4(cr4save); /* restore cr4 */
383 * Hunt for the fixed MTRR referencing (addr)
385 static struct mem_range_desc *
386 i686_mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr)
388 struct mem_range_desc *mrd;
391 for (i = 0, mrd = sc->mr_desc; i < (MTRR_N64K + MTRR_N16K + MTRR_N4K); i++, mrd++)
392 if ((addr >= mrd->mr_base) && (addr < (mrd->mr_base + mrd->mr_len)))
398 * Try to satisfy the given range request by manipulating the fixed MTRRs that
401 * Note that we try to be generous here; we'll bloat the range out to the
402 * next higher/lower boundary to avoid the consumer having to know too much
403 * about the mechanisms here.
405 * XXX note that this will have to be updated when we start supporting "busy" ranges.
408 i686_mrsetlow(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
410 struct mem_range_desc *first_md, *last_md, *curr_md;
413 if (((first_md = i686_mtrrfixsearch(sc, mrd->mr_base)) == NULL) ||
414 ((last_md = i686_mtrrfixsearch(sc, mrd->mr_base + mrd->mr_len - 1)) == NULL))
417 /* check we aren't doing something risky */
418 if (!(mrd->mr_flags & MDF_FORCE))
419 for (curr_md = first_md; curr_md <= last_md; curr_md++) {
420 if ((curr_md->mr_flags & MDF_ATTRMASK) == MDF_UNKNOWN)
424 /* set flags, clear set-by-firmware flag */
425 for (curr_md = first_md; curr_md <= last_md; curr_md++) {
426 curr_md->mr_flags = mrcopyflags(curr_md->mr_flags & ~MDF_FIRMWARE, mrd->mr_flags);
427 bcopy(mrd->mr_owner, curr_md->mr_owner, sizeof(mrd->mr_owner));
435 * Modify/add a variable MTRR to satisfy the request.
437 * XXX needs to be updated to properly support "busy" ranges.
440 i686_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
442 struct mem_range_desc *curr_md, *free_md;
446 * Scan the currently active variable descriptors, look for
447 * one we exactly match (straight takeover) and for possible
448 * accidental overlaps.
449 * Keep track of the first empty variable descriptor in case we
450 * can't perform a takeover.
452 i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
453 curr_md = sc->mr_desc + i;
455 for (; i < sc->mr_ndesc; i++, curr_md++) {
456 if (curr_md->mr_flags & MDF_ACTIVE) {
458 if ((curr_md->mr_base == mrd->mr_base) &&
459 (curr_md->mr_len == mrd->mr_len)) {
460 /* whoops, owned by someone */
461 if (curr_md->mr_flags & MDF_BUSY)
463 /* check we aren't doing something risky */
464 if (!(mrd->mr_flags & MDF_FORCE) &&
465 ((curr_md->mr_flags & MDF_ATTRMASK) == MDF_UNKNOWN))
467 /* Ok, just hijack this entry */
471 /* non-exact overlap ? */
472 if (mroverlap(curr_md, mrd)) {
473 /* between conflicting region types? */
474 if (i686_mtrrconflict(curr_md->mr_flags, mrd->mr_flags))
477 } else if (free_md == NULL) {
481 /* got somewhere to put it? */
485 /* Set up new descriptor */
486 free_md->mr_base = mrd->mr_base;
487 free_md->mr_len = mrd->mr_len;
488 free_md->mr_flags = mrcopyflags(MDF_ACTIVE, mrd->mr_flags);
489 bcopy(mrd->mr_owner, free_md->mr_owner, sizeof(mrd->mr_owner));
494 * Handle requests to set memory range attributes by manipulating MTRRs.
498 i686_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
500 struct mem_range_desc *targ;
504 case MEMRANGE_SET_UPDATE:
505 /* make sure that what's being asked for is even possible at all */
506 if (!mrvalid(mrd->mr_base, mrd->mr_len) ||
507 i686_mtrrtype(mrd->mr_flags) == -1)
510 #define FIXTOP ((MTRR_N64K * 0x10000) + (MTRR_N16K * 0x4000) + (MTRR_N4K * 0x1000))
512 /* are the "low memory" conditions applicable? */
513 if ((sc->mr_cap & MR686_FIXMTRR) &&
514 ((mrd->mr_base + mrd->mr_len) <= FIXTOP)) {
515 if ((error = i686_mrsetlow(sc, mrd, arg)) != 0)
518 /* it's time to play with variable MTRRs */
519 if ((error = i686_mrsetvariable(sc, mrd, arg)) != 0)
524 case MEMRANGE_SET_REMOVE:
525 if ((targ = mem_range_match(sc, mrd)) == NULL)
527 if (targ->mr_flags & MDF_FIXACTIVE)
529 if (targ->mr_flags & MDF_BUSY)
531 targ->mr_flags &= ~MDF_ACTIVE;
532 targ->mr_owner[0] = 0;
539 /* update the hardware */
541 i686_mrfetch(sc); /* refetch to see where we're at */
546 * Work out how many ranges we support, initialise storage for them,
547 * fetch the initial settings.
550 i686_mrinit(struct mem_range_softc *sc)
552 struct mem_range_desc *mrd;
556 mtrrcap = rdmsr(MSR_MTRRcap);
557 mtrrdef = rdmsr(MSR_MTRRdefType);
559 /* For now, bail out if MTRRs are not enabled */
560 if (!(mtrrdef & 0x800)) {
562 printf("CPU supports MTRRs but not enabled\n");
565 nmdesc = mtrrcap & 0xff;
566 printf("Pentium Pro MTRR support enabled\n");
568 /* If fixed MTRRs supported and enabled */
569 if ((mtrrcap & 0x100) && (mtrrdef & 0x400)) {
570 sc->mr_cap = MR686_FIXMTRR;
571 nmdesc += MTRR_N64K + MTRR_N16K + MTRR_N4K;
575 (struct mem_range_desc *)kmalloc(nmdesc * sizeof(struct mem_range_desc),
576 M_MEMDESC, M_WAITOK);
577 bzero(sc->mr_desc, nmdesc * sizeof(struct mem_range_desc));
578 sc->mr_ndesc = nmdesc;
582 /* Populate the fixed MTRR entries' base/length */
583 if (sc->mr_cap & MR686_FIXMTRR) {
584 for (i = 0; i < MTRR_N64K; i++, mrd++) {
585 mrd->mr_base = i * 0x10000;
586 mrd->mr_len = 0x10000;
587 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE;
589 for (i = 0; i < MTRR_N16K; i++, mrd++) {
590 mrd->mr_base = i * 0x4000 + 0x80000;
591 mrd->mr_len = 0x4000;
592 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE;
594 for (i = 0; i < MTRR_N4K; i++, mrd++) {
595 mrd->mr_base = i * 0x1000 + 0xc0000;
596 mrd->mr_len = 0x1000;
597 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE;
602 * Get current settings, anything set now is considered to have
603 * been set by the firmware. (XXX has something already played here?)
607 for (i = 0; i < sc->mr_ndesc; i++, mrd++) {
608 if (mrd->mr_flags & MDF_ACTIVE)
609 mrd->mr_flags |= MDF_FIRMWARE;
614 * Initialise MTRRs on an AP after the BSP has run the init code.
617 i686_mrAPinit(struct mem_range_softc *sc)
619 i686_mrstoreone((void *)sc); /* set MTRRs to match BSP */
620 wrmsr(MSR_MTRRdefType, mtrrdef); /* set MTRR behaviour to match BSP */
624 i686_mem_drvinit(void *unused)
626 /* Try for i686 MTRRs */
627 if ((cpu_feature & CPUID_MTRR) &&
628 ((cpu_id & 0xf00) == 0x600 || (cpu_id & 0xf00) == 0xf00) &&
629 ((strcmp(cpu_vendor, "GenuineIntel") == 0) ||
630 (strcmp(cpu_vendor, "AuthenticAMD") == 0))) {
631 mem_range_softc.mr_op = &i686_mrops;
635 SYSINIT(i686memdev,SI_SUB_DRIVERS,SI_ORDER_FIRST,i686_mem_drvinit,NULL)