| 1 | /*- |
| 2 | * Copyright (c) 1999 Michael Smith <msmith@freebsd.org> |
| 3 | * All rights reserved. |
| 4 | * |
| 5 | * Redistribution and use in source and binary forms, with or without |
| 6 | * modification, are permitted provided that the following conditions |
| 7 | * are met: |
| 8 | * 1. Redistributions of source code must retain the above copyright |
| 9 | * notice, this list of conditions and the following disclaimer. |
| 10 | * 2. Redistributions in binary form must reproduce the above copyright |
| 11 | * notice, this list of conditions and the following disclaimer in the |
| 12 | * documentation and/or other materials provided with the distribution. |
| 13 | * |
| 14 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
| 15 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 16 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 17 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
| 18 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 19 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 20 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 21 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 22 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 23 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 24 | * SUCH DAMAGE. |
| 25 | * |
| 26 | * $FreeBSD: src/sys/i386/i386/i686_mem.c,v 1.8.2.4 2002/09/24 08:12:51 mdodd Exp $ |
| 27 | * $DragonFly: src/sys/platform/pc32/i386/i686_mem.c,v 1.3 2003/07/06 21:23:48 dillon Exp $ |
| 28 | */ |
| 29 | |
| 30 | #include <sys/param.h> |
| 31 | #include <sys/kernel.h> |
| 32 | #include <sys/systm.h> |
| 33 | #include <sys/malloc.h> |
| 34 | #include <sys/memrange.h> |
| 35 | |
| 36 | #include <machine/md_var.h> |
| 37 | #include <machine/specialreg.h> |
| 38 | |
| 39 | #ifdef SMP |
| 40 | #include <machine/smp.h> |
| 41 | #endif |
| 42 | #include <machine/lock.h> |
| 43 | |
| 44 | /* |
| 45 | * i686 memory range operations |
| 46 | * |
| 47 | * This code will probably be impenetrable without reference to the |
| 48 | * Intel Pentium Pro documentation. |
| 49 | */ |
| 50 | |
| 51 | static char *mem_owner_bios = "BIOS"; |
| 52 | |
| 53 | #define MR686_FIXMTRR (1<<0) |
| 54 | |
| 55 | #define mrwithin(mr, a) \ |
| 56 | (((a) >= (mr)->mr_base) && ((a) < ((mr)->mr_base + (mr)->mr_len))) |
| 57 | #define mroverlap(mra, mrb) \ |
| 58 | (mrwithin(mra, mrb->mr_base) || mrwithin(mrb, mra->mr_base)) |
| 59 | |
| 60 | #define mrvalid(base, len) \ |
| 61 | ((!(base & ((1 << 12) - 1))) && /* base is multiple of 4k */ \ |
| 62 | ((len) >= (1 << 12)) && /* length is >= 4k */ \ |
| 63 | powerof2((len)) && /* ... and power of two */ \ |
| 64 | !((base) & ((len) - 1))) /* range is not discontiuous */ |
| 65 | |
| 66 | #define mrcopyflags(curr, new) (((curr) & ~MDF_ATTRMASK) | ((new) & MDF_ATTRMASK)) |
| 67 | |
| 68 | static void i686_mrinit(struct mem_range_softc *sc); |
| 69 | static int i686_mrset(struct mem_range_softc *sc, |
| 70 | struct mem_range_desc *mrd, |
| 71 | int *arg); |
| 72 | static void i686_mrAPinit(struct mem_range_softc *sc); |
| 73 | |
| 74 | static struct mem_range_ops i686_mrops = { |
| 75 | i686_mrinit, |
| 76 | i686_mrset, |
| 77 | i686_mrAPinit |
| 78 | }; |
| 79 | |
| 80 | /* XXX for AP startup hook */ |
| 81 | static u_int64_t mtrrcap, mtrrdef; |
| 82 | |
| 83 | static struct mem_range_desc *mem_range_match(struct mem_range_softc *sc, |
| 84 | struct mem_range_desc *mrd); |
| 85 | static void i686_mrfetch(struct mem_range_softc *sc); |
| 86 | static int i686_mtrrtype(int flags); |
| 87 | static int i686_mrt2mtrr(int flags, int oldval); |
| 88 | static int i686_mtrrconflict(int flag1, int flag2); |
| 89 | static void i686_mrstore(struct mem_range_softc *sc); |
| 90 | static void i686_mrstoreone(void *arg); |
| 91 | static struct mem_range_desc *i686_mtrrfixsearch(struct mem_range_softc *sc, |
| 92 | u_int64_t addr); |
| 93 | static int i686_mrsetlow(struct mem_range_softc *sc, |
| 94 | struct mem_range_desc *mrd, |
| 95 | int *arg); |
| 96 | static int i686_mrsetvariable(struct mem_range_softc *sc, |
| 97 | struct mem_range_desc *mrd, |
| 98 | int *arg); |
| 99 | |
| 100 | /* i686 MTRR type to memory range type conversion */ |
| 101 | static int i686_mtrrtomrt[] = { |
| 102 | MDF_UNCACHEABLE, |
| 103 | MDF_WRITECOMBINE, |
| 104 | MDF_UNKNOWN, |
| 105 | MDF_UNKNOWN, |
| 106 | MDF_WRITETHROUGH, |
| 107 | MDF_WRITEPROTECT, |
| 108 | MDF_WRITEBACK |
| 109 | }; |
| 110 | |
| 111 | #define MTRRTOMRTLEN (sizeof(i686_mtrrtomrt) / sizeof(i686_mtrrtomrt[0])) |
| 112 | |
| 113 | static int |
| 114 | i686_mtrr2mrt(int val) { |
| 115 | if (val < 0 || val >= MTRRTOMRTLEN) |
| 116 | return MDF_UNKNOWN; |
| 117 | return i686_mtrrtomrt[val]; |
| 118 | } |
| 119 | |
| 120 | /* |
| 121 | * i686 MTRR conflicts. Writeback and uncachable may overlap. |
| 122 | */ |
| 123 | static int |
| 124 | i686_mtrrconflict(int flag1, int flag2) { |
| 125 | flag1 &= MDF_ATTRMASK; |
| 126 | flag2 &= MDF_ATTRMASK; |
| 127 | if (flag1 == flag2 || |
| 128 | (flag1 == MDF_WRITEBACK && flag2 == MDF_UNCACHEABLE) || |
| 129 | (flag2 == MDF_WRITEBACK && flag1 == MDF_UNCACHEABLE)) |
| 130 | return 0; |
| 131 | return 1; |
| 132 | } |
| 133 | |
| 134 | /* |
| 135 | * Look for an exactly-matching range. |
| 136 | */ |
| 137 | static struct mem_range_desc * |
| 138 | mem_range_match(struct mem_range_softc *sc, struct mem_range_desc *mrd) |
| 139 | { |
| 140 | struct mem_range_desc *cand; |
| 141 | int i; |
| 142 | |
| 143 | for (i = 0, cand = sc->mr_desc; i < sc->mr_ndesc; i++, cand++) |
| 144 | if ((cand->mr_base == mrd->mr_base) && |
| 145 | (cand->mr_len == mrd->mr_len)) |
| 146 | return(cand); |
| 147 | return(NULL); |
| 148 | } |
| 149 | |
| 150 | /* |
| 151 | * Fetch the current mtrr settings from the current CPU (assumed to all |
| 152 | * be in sync in the SMP case). Note that if we are here, we assume |
| 153 | * that MTRRs are enabled, and we may or may not have fixed MTRRs. |
| 154 | */ |
| 155 | static void |
| 156 | i686_mrfetch(struct mem_range_softc *sc) |
| 157 | { |
| 158 | struct mem_range_desc *mrd; |
| 159 | u_int64_t msrv; |
| 160 | int i, j, msr; |
| 161 | |
| 162 | mrd = sc->mr_desc; |
| 163 | |
| 164 | /* Get fixed-range MTRRs */ |
| 165 | if (sc->mr_cap & MR686_FIXMTRR) { |
| 166 | msr = MSR_MTRR64kBase; |
| 167 | for (i = 0; i < (MTRR_N64K / 8); i++, msr++) { |
| 168 | msrv = rdmsr(msr); |
| 169 | for (j = 0; j < 8; j++, mrd++) { |
| 170 | mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) | |
| 171 | i686_mtrr2mrt(msrv & 0xff) | |
| 172 | MDF_ACTIVE; |
| 173 | if (mrd->mr_owner[0] == 0) |
| 174 | strcpy(mrd->mr_owner, mem_owner_bios); |
| 175 | msrv = msrv >> 8; |
| 176 | } |
| 177 | } |
| 178 | msr = MSR_MTRR16kBase; |
| 179 | for (i = 0; i < (MTRR_N16K / 8); i++, msr++) { |
| 180 | msrv = rdmsr(msr); |
| 181 | for (j = 0; j < 8; j++, mrd++) { |
| 182 | mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) | |
| 183 | i686_mtrr2mrt(msrv & 0xff) | |
| 184 | MDF_ACTIVE; |
| 185 | if (mrd->mr_owner[0] == 0) |
| 186 | strcpy(mrd->mr_owner, mem_owner_bios); |
| 187 | msrv = msrv >> 8; |
| 188 | } |
| 189 | } |
| 190 | msr = MSR_MTRR4kBase; |
| 191 | for (i = 0; i < (MTRR_N4K / 8); i++, msr++) { |
| 192 | msrv = rdmsr(msr); |
| 193 | for (j = 0; j < 8; j++, mrd++) { |
| 194 | mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) | |
| 195 | i686_mtrr2mrt(msrv & 0xff) | |
| 196 | MDF_ACTIVE; |
| 197 | if (mrd->mr_owner[0] == 0) |
| 198 | strcpy(mrd->mr_owner, mem_owner_bios); |
| 199 | msrv = msrv >> 8; |
| 200 | } |
| 201 | } |
| 202 | } |
| 203 | |
| 204 | /* Get remainder which must be variable MTRRs */ |
| 205 | msr = MSR_MTRRVarBase; |
| 206 | for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) { |
| 207 | msrv = rdmsr(msr); |
| 208 | mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) | |
| 209 | i686_mtrr2mrt(msrv & 0xff); |
| 210 | mrd->mr_base = msrv & 0x0000000ffffff000LL; |
| 211 | msrv = rdmsr(msr + 1); |
| 212 | mrd->mr_flags = (msrv & 0x800) ? |
| 213 | (mrd->mr_flags | MDF_ACTIVE) : |
| 214 | (mrd->mr_flags & ~MDF_ACTIVE); |
| 215 | /* Compute the range from the mask. Ick. */ |
| 216 | mrd->mr_len = (~(msrv & 0x0000000ffffff000LL) & 0x0000000fffffffffLL) + 1; |
| 217 | if (!mrvalid(mrd->mr_base, mrd->mr_len)) |
| 218 | mrd->mr_flags |= MDF_BOGUS; |
| 219 | /* If unclaimed and active, must be the BIOS */ |
| 220 | if ((mrd->mr_flags & MDF_ACTIVE) && (mrd->mr_owner[0] == 0)) |
| 221 | strcpy(mrd->mr_owner, mem_owner_bios); |
| 222 | } |
| 223 | } |
| 224 | |
| 225 | /* |
| 226 | * Return the MTRR memory type matching a region's flags |
| 227 | */ |
| 228 | static int |
| 229 | i686_mtrrtype(int flags) |
| 230 | { |
| 231 | int i; |
| 232 | |
| 233 | flags &= MDF_ATTRMASK; |
| 234 | |
| 235 | for (i = 0; i < MTRRTOMRTLEN; i++) { |
| 236 | if (i686_mtrrtomrt[i] == MDF_UNKNOWN) |
| 237 | continue; |
| 238 | if (flags == i686_mtrrtomrt[i]) |
| 239 | return(i); |
| 240 | } |
| 241 | return(-1); |
| 242 | } |
| 243 | |
| 244 | static int |
| 245 | i686_mrt2mtrr(int flags, int oldval) |
| 246 | { |
| 247 | int val; |
| 248 | |
| 249 | if ((val = i686_mtrrtype(flags)) == -1) |
| 250 | return oldval & 0xff; |
| 251 | return val & 0xff; |
| 252 | } |
| 253 | |
| 254 | /* |
| 255 | * Update running CPU(s) MTRRs to match the ranges in the descriptor |
| 256 | * list. |
| 257 | * |
| 258 | * XXX Must be called with interrupts enabled. |
| 259 | */ |
| 260 | static void |
| 261 | i686_mrstore(struct mem_range_softc *sc) |
| 262 | { |
| 263 | #ifdef SMP |
| 264 | /* |
| 265 | * We should use all_but_self_ipi() to call other CPUs into a |
| 266 | * locking gate, then call a target function to do this work. |
| 267 | * The "proper" solution involves a generalised locking gate |
| 268 | * implementation, not ready yet. |
| 269 | */ |
| 270 | smp_rendezvous(NULL, i686_mrstoreone, NULL, (void *)sc); |
| 271 | #else |
| 272 | mpintr_lock(); /* doesn't have to be mpintr YYY */ |
| 273 | i686_mrstoreone((void *)sc); |
| 274 | mpintr_unlock(); |
| 275 | #endif |
| 276 | } |
| 277 | |
| 278 | /* |
| 279 | * Update the current CPU's MTRRs with those represented in the |
| 280 | * descriptor list. Note that we do this wholesale rather than |
| 281 | * just stuffing one entry; this is simpler (but slower, of course). |
| 282 | */ |
| 283 | static void |
| 284 | i686_mrstoreone(void *arg) |
| 285 | { |
| 286 | struct mem_range_softc *sc = (struct mem_range_softc *)arg; |
| 287 | struct mem_range_desc *mrd; |
| 288 | u_int64_t omsrv, msrv; |
| 289 | int i, j, msr; |
| 290 | u_int cr4save; |
| 291 | |
| 292 | mrd = sc->mr_desc; |
| 293 | |
| 294 | cr4save = rcr4(); /* save cr4 */ |
| 295 | if (cr4save & CR4_PGE) |
| 296 | load_cr4(cr4save & ~CR4_PGE); |
| 297 | load_cr0((rcr0() & ~CR0_NW) | CR0_CD); /* disable caches (CD = 1, NW = 0) */ |
| 298 | wbinvd(); /* flush caches, TLBs */ |
| 299 | wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~0x800); /* disable MTRRs (E = 0) */ |
| 300 | |
| 301 | /* Set fixed-range MTRRs */ |
| 302 | if (sc->mr_cap & MR686_FIXMTRR) { |
| 303 | msr = MSR_MTRR64kBase; |
| 304 | for (i = 0; i < (MTRR_N64K / 8); i++, msr++) { |
| 305 | msrv = 0; |
| 306 | omsrv = rdmsr(msr); |
| 307 | for (j = 7; j >= 0; j--) { |
| 308 | msrv = msrv << 8; |
| 309 | msrv |= i686_mrt2mtrr((mrd + j)->mr_flags, omsrv >> (j*8)); |
| 310 | } |
| 311 | wrmsr(msr, msrv); |
| 312 | mrd += 8; |
| 313 | } |
| 314 | msr = MSR_MTRR16kBase; |
| 315 | for (i = 0; i < (MTRR_N16K / 8); i++, msr++) { |
| 316 | msrv = 0; |
| 317 | omsrv = rdmsr(msr); |
| 318 | for (j = 7; j >= 0; j--) { |
| 319 | msrv = msrv << 8; |
| 320 | msrv |= i686_mrt2mtrr((mrd + j)->mr_flags, omsrv >> (j*8)); |
| 321 | } |
| 322 | wrmsr(msr, msrv); |
| 323 | mrd += 8; |
| 324 | } |
| 325 | msr = MSR_MTRR4kBase; |
| 326 | for (i = 0; i < (MTRR_N4K / 8); i++, msr++) { |
| 327 | msrv = 0; |
| 328 | omsrv = rdmsr(msr); |
| 329 | for (j = 7; j >= 0; j--) { |
| 330 | msrv = msrv << 8; |
| 331 | msrv |= i686_mrt2mtrr((mrd + j)->mr_flags, omsrv >> (j*8)); |
| 332 | } |
| 333 | wrmsr(msr, msrv); |
| 334 | mrd += 8; |
| 335 | } |
| 336 | } |
| 337 | |
| 338 | /* Set remainder which must be variable MTRRs */ |
| 339 | msr = MSR_MTRRVarBase; |
| 340 | for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) { |
| 341 | /* base/type register */ |
| 342 | omsrv = rdmsr(msr); |
| 343 | if (mrd->mr_flags & MDF_ACTIVE) { |
| 344 | msrv = mrd->mr_base & 0x0000000ffffff000LL; |
| 345 | msrv |= i686_mrt2mtrr(mrd->mr_flags, omsrv); |
| 346 | } else { |
| 347 | msrv = 0; |
| 348 | } |
| 349 | wrmsr(msr, msrv); |
| 350 | |
| 351 | /* mask/active register */ |
| 352 | if (mrd->mr_flags & MDF_ACTIVE) { |
| 353 | msrv = 0x800 | (~(mrd->mr_len - 1) & 0x0000000ffffff000LL); |
| 354 | } else { |
| 355 | msrv = 0; |
| 356 | } |
| 357 | wrmsr(msr + 1, msrv); |
| 358 | } |
| 359 | wbinvd(); /* flush caches, TLBs */ |
| 360 | wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) | 0x800); /* restore MTRR state */ |
| 361 | load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* enable caches CD = 0 and NW = 0 */ |
| 362 | load_cr4(cr4save); /* restore cr4 */ |
| 363 | } |
| 364 | |
| 365 | /* |
| 366 | * Hunt for the fixed MTRR referencing (addr) |
| 367 | */ |
| 368 | static struct mem_range_desc * |
| 369 | i686_mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr) |
| 370 | { |
| 371 | struct mem_range_desc *mrd; |
| 372 | int i; |
| 373 | |
| 374 | for (i = 0, mrd = sc->mr_desc; i < (MTRR_N64K + MTRR_N16K + MTRR_N4K); i++, mrd++) |
| 375 | if ((addr >= mrd->mr_base) && (addr < (mrd->mr_base + mrd->mr_len))) |
| 376 | return(mrd); |
| 377 | return(NULL); |
| 378 | } |
| 379 | |
| 380 | /* |
| 381 | * Try to satisfy the given range request by manipulating the fixed MTRRs that |
| 382 | * cover low memory. |
| 383 | * |
| 384 | * Note that we try to be generous here; we'll bloat the range out to the |
| 385 | * next higher/lower boundary to avoid the consumer having to know too much |
| 386 | * about the mechanisms here. |
| 387 | * |
| 388 | * XXX note that this will have to be updated when we start supporting "busy" ranges. |
| 389 | */ |
| 390 | static int |
| 391 | i686_mrsetlow(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg) |
| 392 | { |
| 393 | struct mem_range_desc *first_md, *last_md, *curr_md; |
| 394 | |
| 395 | /* range check */ |
| 396 | if (((first_md = i686_mtrrfixsearch(sc, mrd->mr_base)) == NULL) || |
| 397 | ((last_md = i686_mtrrfixsearch(sc, mrd->mr_base + mrd->mr_len - 1)) == NULL)) |
| 398 | return(EINVAL); |
| 399 | |
| 400 | /* check we aren't doing something risky */ |
| 401 | if (!(mrd->mr_flags & MDF_FORCE)) |
| 402 | for (curr_md = first_md; curr_md <= last_md; curr_md++) { |
| 403 | if ((curr_md->mr_flags & MDF_ATTRMASK) == MDF_UNKNOWN) |
| 404 | return (EACCES); |
| 405 | } |
| 406 | |
| 407 | /* set flags, clear set-by-firmware flag */ |
| 408 | for (curr_md = first_md; curr_md <= last_md; curr_md++) { |
| 409 | curr_md->mr_flags = mrcopyflags(curr_md->mr_flags & ~MDF_FIRMWARE, mrd->mr_flags); |
| 410 | bcopy(mrd->mr_owner, curr_md->mr_owner, sizeof(mrd->mr_owner)); |
| 411 | } |
| 412 | |
| 413 | return(0); |
| 414 | } |
| 415 | |
| 416 | |
| 417 | /* |
| 418 | * Modify/add a variable MTRR to satisfy the request. |
| 419 | * |
| 420 | * XXX needs to be updated to properly support "busy" ranges. |
| 421 | */ |
| 422 | static int |
| 423 | i686_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg) |
| 424 | { |
| 425 | struct mem_range_desc *curr_md, *free_md; |
| 426 | int i; |
| 427 | |
| 428 | /* |
| 429 | * Scan the currently active variable descriptors, look for |
| 430 | * one we exactly match (straight takeover) and for possible |
| 431 | * accidental overlaps. |
| 432 | * Keep track of the first empty variable descriptor in case we |
| 433 | * can't perform a takeover. |
| 434 | */ |
| 435 | i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0; |
| 436 | curr_md = sc->mr_desc + i; |
| 437 | free_md = NULL; |
| 438 | for (; i < sc->mr_ndesc; i++, curr_md++) { |
| 439 | if (curr_md->mr_flags & MDF_ACTIVE) { |
| 440 | /* exact match? */ |
| 441 | if ((curr_md->mr_base == mrd->mr_base) && |
| 442 | (curr_md->mr_len == mrd->mr_len)) { |
| 443 | /* whoops, owned by someone */ |
| 444 | if (curr_md->mr_flags & MDF_BUSY) |
| 445 | return(EBUSY); |
| 446 | /* check we aren't doing something risky */ |
| 447 | if (!(mrd->mr_flags & MDF_FORCE) && |
| 448 | ((curr_md->mr_flags & MDF_ATTRMASK) == MDF_UNKNOWN)) |
| 449 | return (EACCES); |
| 450 | /* Ok, just hijack this entry */ |
| 451 | free_md = curr_md; |
| 452 | break; |
| 453 | } |
| 454 | /* non-exact overlap ? */ |
| 455 | if (mroverlap(curr_md, mrd)) { |
| 456 | /* between conflicting region types? */ |
| 457 | if (i686_mtrrconflict(curr_md->mr_flags, mrd->mr_flags)) |
| 458 | return(EINVAL); |
| 459 | } |
| 460 | } else if (free_md == NULL) { |
| 461 | free_md = curr_md; |
| 462 | } |
| 463 | } |
| 464 | /* got somewhere to put it? */ |
| 465 | if (free_md == NULL) |
| 466 | return(ENOSPC); |
| 467 | |
| 468 | /* Set up new descriptor */ |
| 469 | free_md->mr_base = mrd->mr_base; |
| 470 | free_md->mr_len = mrd->mr_len; |
| 471 | free_md->mr_flags = mrcopyflags(MDF_ACTIVE, mrd->mr_flags); |
| 472 | bcopy(mrd->mr_owner, free_md->mr_owner, sizeof(mrd->mr_owner)); |
| 473 | return(0); |
| 474 | } |
| 475 | |
| 476 | /* |
| 477 | * Handle requests to set memory range attributes by manipulating MTRRs. |
| 478 | * |
| 479 | */ |
| 480 | static int |
| 481 | i686_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg) |
| 482 | { |
| 483 | struct mem_range_desc *targ; |
| 484 | int error = 0; |
| 485 | |
| 486 | switch(*arg) { |
| 487 | case MEMRANGE_SET_UPDATE: |
| 488 | /* make sure that what's being asked for is even possible at all */ |
| 489 | if (!mrvalid(mrd->mr_base, mrd->mr_len) || |
| 490 | i686_mtrrtype(mrd->mr_flags) == -1) |
| 491 | return(EINVAL); |
| 492 | |
| 493 | #define FIXTOP ((MTRR_N64K * 0x10000) + (MTRR_N16K * 0x4000) + (MTRR_N4K * 0x1000)) |
| 494 | |
| 495 | /* are the "low memory" conditions applicable? */ |
| 496 | if ((sc->mr_cap & MR686_FIXMTRR) && |
| 497 | ((mrd->mr_base + mrd->mr_len) <= FIXTOP)) { |
| 498 | if ((error = i686_mrsetlow(sc, mrd, arg)) != 0) |
| 499 | return(error); |
| 500 | } else { |
| 501 | /* it's time to play with variable MTRRs */ |
| 502 | if ((error = i686_mrsetvariable(sc, mrd, arg)) != 0) |
| 503 | return(error); |
| 504 | } |
| 505 | break; |
| 506 | |
| 507 | case MEMRANGE_SET_REMOVE: |
| 508 | if ((targ = mem_range_match(sc, mrd)) == NULL) |
| 509 | return(ENOENT); |
| 510 | if (targ->mr_flags & MDF_FIXACTIVE) |
| 511 | return(EPERM); |
| 512 | if (targ->mr_flags & MDF_BUSY) |
| 513 | return(EBUSY); |
| 514 | targ->mr_flags &= ~MDF_ACTIVE; |
| 515 | targ->mr_owner[0] = 0; |
| 516 | break; |
| 517 | |
| 518 | default: |
| 519 | return(EOPNOTSUPP); |
| 520 | } |
| 521 | |
| 522 | /* update the hardware */ |
| 523 | i686_mrstore(sc); |
| 524 | i686_mrfetch(sc); /* refetch to see where we're at */ |
| 525 | return(0); |
| 526 | } |
| 527 | |
| 528 | /* |
| 529 | * Work out how many ranges we support, initialise storage for them, |
| 530 | * fetch the initial settings. |
| 531 | */ |
| 532 | static void |
| 533 | i686_mrinit(struct mem_range_softc *sc) |
| 534 | { |
| 535 | struct mem_range_desc *mrd; |
| 536 | int nmdesc = 0; |
| 537 | int i; |
| 538 | |
| 539 | mtrrcap = rdmsr(MSR_MTRRcap); |
| 540 | mtrrdef = rdmsr(MSR_MTRRdefType); |
| 541 | |
| 542 | /* For now, bail out if MTRRs are not enabled */ |
| 543 | if (!(mtrrdef & 0x800)) { |
| 544 | if (bootverbose) |
| 545 | printf("CPU supports MTRRs but not enabled\n"); |
| 546 | return; |
| 547 | } |
| 548 | nmdesc = mtrrcap & 0xff; |
| 549 | printf("Pentium Pro MTRR support enabled\n"); |
| 550 | |
| 551 | /* If fixed MTRRs supported and enabled */ |
| 552 | if ((mtrrcap & 0x100) && (mtrrdef & 0x400)) { |
| 553 | sc->mr_cap = MR686_FIXMTRR; |
| 554 | nmdesc += MTRR_N64K + MTRR_N16K + MTRR_N4K; |
| 555 | } |
| 556 | |
| 557 | sc->mr_desc = |
| 558 | (struct mem_range_desc *)malloc(nmdesc * sizeof(struct mem_range_desc), |
| 559 | M_MEMDESC, M_WAITOK); |
| 560 | bzero(sc->mr_desc, nmdesc * sizeof(struct mem_range_desc)); |
| 561 | sc->mr_ndesc = nmdesc; |
| 562 | |
| 563 | mrd = sc->mr_desc; |
| 564 | |
| 565 | /* Populate the fixed MTRR entries' base/length */ |
| 566 | if (sc->mr_cap & MR686_FIXMTRR) { |
| 567 | for (i = 0; i < MTRR_N64K; i++, mrd++) { |
| 568 | mrd->mr_base = i * 0x10000; |
| 569 | mrd->mr_len = 0x10000; |
| 570 | mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE; |
| 571 | } |
| 572 | for (i = 0; i < MTRR_N16K; i++, mrd++) { |
| 573 | mrd->mr_base = i * 0x4000 + 0x80000; |
| 574 | mrd->mr_len = 0x4000; |
| 575 | mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE; |
| 576 | } |
| 577 | for (i = 0; i < MTRR_N4K; i++, mrd++) { |
| 578 | mrd->mr_base = i * 0x1000 + 0xc0000; |
| 579 | mrd->mr_len = 0x1000; |
| 580 | mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE; |
| 581 | } |
| 582 | } |
| 583 | |
| 584 | /* |
| 585 | * Get current settings, anything set now is considered to have |
| 586 | * been set by the firmware. (XXX has something already played here?) |
| 587 | */ |
| 588 | i686_mrfetch(sc); |
| 589 | mrd = sc->mr_desc; |
| 590 | for (i = 0; i < sc->mr_ndesc; i++, mrd++) { |
| 591 | if (mrd->mr_flags & MDF_ACTIVE) |
| 592 | mrd->mr_flags |= MDF_FIRMWARE; |
| 593 | } |
| 594 | } |
| 595 | |
| 596 | /* |
| 597 | * Initialise MTRRs on an AP after the BSP has run the init code. |
| 598 | */ |
| 599 | static void |
| 600 | i686_mrAPinit(struct mem_range_softc *sc) |
| 601 | { |
| 602 | i686_mrstoreone((void *)sc); /* set MTRRs to match BSP */ |
| 603 | wrmsr(MSR_MTRRdefType, mtrrdef); /* set MTRR behaviour to match BSP */ |
| 604 | } |
| 605 | |
| 606 | static void |
| 607 | i686_mem_drvinit(void *unused) |
| 608 | { |
| 609 | /* Try for i686 MTRRs */ |
| 610 | if ((cpu_feature & CPUID_MTRR) && |
| 611 | ((cpu_id & 0xf00) == 0x600 || (cpu_id & 0xf00) == 0xf00) && |
| 612 | ((strcmp(cpu_vendor, "GenuineIntel") == 0) || |
| 613 | (strcmp(cpu_vendor, "AuthenticAMD") == 0))) { |
| 614 | mem_range_softc.mr_op = &i686_mrops; |
| 615 | } |
| 616 | } |
| 617 | |
| 618 | SYSINIT(i686memdev,SI_SUB_DRIVERS,SI_ORDER_FIRST,i686_mem_drvinit,NULL) |
| 619 | |
| 620 | |