2 * Copyright (c) 2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Sepherosa Ziehau <sepherosa@gmail.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/param.h>
36 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/malloc.h>
40 #include <sys/bitops.h>
42 #include <bus/pci/pcivar.h>
43 #include <bus/pci/pcireg.h>
44 #include <bus/pci/pcibus.h>
45 #include <bus/pci/pci_cfgreg.h>
46 #include <bus/pci/pcib_private.h>
50 #include <dev/misc/ecc/e5_imc_reg.h>
51 #include <dev/misc/ecc/e5_imc_var.h>
54 int rank_dimm; /* owner dimm */
55 int rank_dimm_rank; /* rank within the owner dimm */
60 const struct e5_imc_chan *ecc_chan;
63 struct ecc_e5_rank ecc_rank[PCI_E5_IMC_ERROR_RANK_MAX];
64 struct callout ecc_callout;
67 #define ecc_printf(sc, fmt, arg...) \
68 device_printf((sc)->ecc_dev, fmt , ##arg)
70 static int ecc_e5_probe(device_t);
71 static int ecc_e5_attach(device_t);
72 static int ecc_e5_detach(device_t);
73 static void ecc_e5_shutdown(device_t);
75 static void ecc_e5_callout(void *);
77 #define ECC_E5_CHAN(v, imc, c, c_ext) \
79 .did = PCI_E5V##v##_IMC##imc##_ERROR_CHN##c##_DID_ID, \
80 .slot = PCISLOT_E5V##v##_IMC##imc##_ERROR_CHN##c, \
81 .func = PCIFUNC_E5V##v##_IMC##imc##_ERROR_CHN##c, \
82 .desc = "Intel E5 v" #v " ECC", \
84 E5_IMC_CHAN_FIELDS(v, imc, c, c_ext) \
87 #define ECC_E5_CHAN_V2(c) ECC_E5_CHAN(2, 0, c, c)
88 #define ECC_E5_CHAN_END E5_IMC_CHAN_END
90 static const struct e5_imc_chan ecc_e5_chans[] = {
99 #undef ECC_E5_CHAN_END
100 #undef ECC_E5_CHAN_V2
103 static device_method_t ecc_e5_methods[] = {
104 /* Device interface */
105 DEVMETHOD(device_probe, ecc_e5_probe),
106 DEVMETHOD(device_attach, ecc_e5_attach),
107 DEVMETHOD(device_detach, ecc_e5_detach),
108 DEVMETHOD(device_shutdown, ecc_e5_shutdown),
109 DEVMETHOD(device_suspend, bus_generic_suspend),
110 DEVMETHOD(device_resume, bus_generic_resume),
114 static driver_t ecc_e5_driver = {
117 sizeof(struct ecc_e5_softc)
119 static devclass_t ecc_devclass;
120 DRIVER_MODULE(ecc_e5, pci, ecc_e5_driver, ecc_devclass, NULL, NULL);
121 MODULE_DEPEND(ecc_e5, pci, 1, 1, 1);
124 ecc_e5_probe(device_t dev)
126 const struct e5_imc_chan *c;
130 vid = pci_get_vendor(dev);
131 if (vid != PCI_E5_IMC_VID_ID)
134 did = pci_get_device(dev);
135 slot = pci_get_slot(dev);
136 func = pci_get_function(dev);
138 for (c = ecc_e5_chans; c->desc != NULL; ++c) {
139 if (c->did == did && c->slot == slot && c->func == func) {
140 struct ecc_e5_softc *sc = device_get_softc(dev);
144 node = e5_imc_node_probe(dev, c);
148 ksnprintf(desc, sizeof(desc), "%s node%d channel%d",
149 c->desc, node, c->chan_ext);
150 device_set_desc_copy(dev, desc);
161 ecc_e5_attach(device_t dev)
163 struct ecc_e5_softc *sc = device_get_softc(dev);
167 callout_init_mp(&sc->ecc_callout);
170 mcmtr = IMC_CPGC_READ_4(sc->ecc_dev, sc->ecc_chan,
171 PCI_E5_IMC_CPGC_MCMTR);
173 if (sc->ecc_chan->ver == E5_IMC_CHAN_VER3 &&
174 (mcmtr & PCI_E5V3_IMC_CPGC_MCMTR_DDR4))
175 ecc_printf(sc, "DDR4 ");
176 if (__SHIFTOUT(mcmtr, PCI_E5_IMC_CPGC_MCMTR_IMC_MODE) ==
177 PCI_E5_IMC_CPGC_MCMTR_IMC_MODE_DDR3) {
178 ecc_printf(sc, "native %s",
179 sc->ecc_chan->ver == E5_IMC_CHAN_VER2 ?
186 for (dimm = 0; dimm < PCI_E5_IMC_CHN_DIMM_MAX; ++dimm) {
193 dimmmtr = IMC_CTAD_READ_4(sc->ecc_dev, sc->ecc_chan,
194 PCI_E5_IMC_CTAD_DIMMMTR(dimm));
196 if ((dimmmtr & PCI_E5_IMC_CTAD_DIMMMTR_DIMM_POP) == 0)
199 val = __SHIFTOUT(dimmmtr, PCI_E5_IMC_CTAD_DIMMMTR_RANK_CNT);
201 case PCI_E5_IMC_CTAD_DIMMMTR_RANK_CNT_SR:
204 case PCI_E5_IMC_CTAD_DIMMMTR_RANK_CNT_DR:
207 case PCI_E5_IMC_CTAD_DIMMMTR_RANK_CNT_QR:
210 case PCI_E5V3_IMC_CTAD_DIMMMTR_RANK_CNT_8R:
211 if (sc->ecc_chan->ver >= E5_IMC_CHAN_VER3) {
217 ecc_printf(sc, "unknown rank count 0x%x\n", val);
221 val = __SHIFTOUT(dimmmtr, PCI_E5_IMC_CTAD_DIMMMTR_DDR3_WIDTH);
223 case PCI_E5_IMC_CTAD_DIMMMTR_DDR3_WIDTH_4:
226 case PCI_E5_IMC_CTAD_DIMMMTR_DDR3_WIDTH_8:
229 case PCI_E5_IMC_CTAD_DIMMMTR_DDR3_WIDTH_16:
233 ecc_printf(sc, "unknown ddr3 width 0x%x\n", val);
237 val = __SHIFTOUT(dimmmtr, PCI_E5_IMC_CTAD_DIMMMTR_DDR3_DNSTY);
239 case PCI_E5_IMC_CTAD_DIMMMTR_DDR3_DNSTY_2G:
242 case PCI_E5_IMC_CTAD_DIMMMTR_DDR3_DNSTY_4G:
245 case PCI_E5_IMC_CTAD_DIMMMTR_DDR3_DNSTY_8G:
248 case PCI_E5_IMC_CTAD_DIMMMTR_DDR3_DNSTY_1G:
249 if (sc->ecc_chan->ver < E5_IMC_CHAN_VER3) {
255 ecc_printf(sc, "unknown ddr3 density 0x%x\n", val);
260 ecc_printf(sc, "DIMM%d %dGB, %d%s, density %dGB\n",
261 dimm, density * rank_cnt * 2,
262 rank_cnt, width, density);
265 for (r = 0; r < rank_cnt; ++r) {
266 struct ecc_e5_rank *rk;
268 if (rank >= PCI_E5_IMC_ERROR_RANK_MAX) {
269 ecc_printf(sc, "too many ranks\n");
272 rk = &sc->ecc_rank[rank];
274 rk->rank_dimm = dimm;
275 rk->rank_dimm_rank = r;
280 sc->ecc_rank_cnt = rank;
282 if ((mcmtr & PCI_E5_IMC_CPGC_MCMTR_ECC_EN) == 0) {
283 ecc_printf(sc, "ECC is not enabled\n");
288 for (rank = 0; rank < sc->ecc_rank_cnt; ++rank) {
289 const struct ecc_e5_rank *rk = &sc->ecc_rank[rank];
293 ofs = PCI_E5_IMC_ERROR_COR_ERR_TH(rank / 2);
295 mask = PCI_E5_IMC_ERROR_COR_ERR_TH_HI;
297 mask = PCI_E5_IMC_ERROR_COR_ERR_TH_LO;
299 thr = pci_read_config(sc->ecc_dev, ofs, 4);
300 ecc_printf(sc, "DIMM%d rank%d, "
301 "corrected error threshold %d\n",
302 rk->rank_dimm, rk->rank_dimm_rank,
303 __SHIFTOUT(thr, mask));
307 callout_reset(&sc->ecc_callout, hz, ecc_e5_callout, sc);
312 ecc_e5_callout(void *xsc)
314 struct ecc_e5_softc *sc = xsc;
315 uint32_t err_ranks, val;
317 val = pci_read_config(sc->ecc_dev, PCI_E5_IMC_ERROR_COR_ERR_STAT, 4);
319 err_ranks = (val & PCI_E5_IMC_ERROR_COR_ERR_STAT_RANKS);
320 while (err_ranks != 0) {
323 rank = ffs(err_ranks) - 1;
324 err_ranks &= ~(1 << rank);
326 if (rank < sc->ecc_rank_cnt) {
327 const struct ecc_e5_rank *rk = &sc->ecc_rank[rank];
331 ofs = PCI_E5_IMC_ERROR_COR_ERR_CNT(rank / 2);
333 mask = PCI_E5_IMC_ERROR_COR_ERR_CNT_HI;
335 mask = PCI_E5_IMC_ERROR_COR_ERR_CNT_LO;
337 err = pci_read_config(sc->ecc_dev, ofs, 4);
338 ecc_printf(sc, "node%d channel%d DIMM%d rank%d, "
339 "too many errors %d",
340 sc->ecc_node, sc->ecc_chan->chan_ext,
341 rk->rank_dimm, rk->rank_dimm_rank,
342 __SHIFTOUT(err, mask));
346 if (val & PCI_E5_IMC_ERROR_COR_ERR_STAT_RANKS) {
347 pci_write_config(sc->ecc_dev, PCI_E5_IMC_ERROR_COR_ERR_STAT,
350 callout_reset(&sc->ecc_callout, hz, ecc_e5_callout, sc);
354 ecc_e5_stop(device_t dev)
356 struct ecc_e5_softc *sc = device_get_softc(dev);
358 callout_stop_sync(&sc->ecc_callout);
362 ecc_e5_detach(device_t dev)
369 ecc_e5_shutdown(device_t dev)