2 * Copyright (c) 2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Sepherosa Ziehau <sepherosa@gmail.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/param.h>
36 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/malloc.h>
40 #include <sys/bitops.h>
42 #include <bus/pci/pcivar.h>
43 #include <bus/pci/pcireg.h>
44 #include <bus/pci/pcibus.h>
45 #include <bus/pci/pci_cfgreg.h>
46 #include <bus/pci/pcib_private.h>
50 #include <dev/misc/ecc/ecc_e5_reg.h>
52 #define UBOX_READ(dev, ofs, w) \
53 pcib_read_config((dev), pci_get_bus((dev)), \
54 PCISLOT_E5_UBOX0, PCIFUNC_E5_UBOX0, (ofs), w)
55 #define UBOX_READ_2(dev, ofs) UBOX_READ((dev), (ofs), 2)
56 #define UBOX_READ_4(dev, ofs) UBOX_READ((dev), (ofs), 4)
58 #define IMC_CPGC_READ(dev, ofs, w) \
59 pcib_read_config((dev), pci_get_bus((dev)), \
60 PCISLOT_E5_IMC_CPGC, PCIFUNC_E5_IMC_CPGC, (ofs), w)
61 #define IMC_CPGC_READ_2(dev, ofs) IMC_CPGC_READ((dev), (ofs), 2)
62 #define IMC_CPGC_READ_4(dev, ofs) IMC_CPGC_READ((dev), (ofs), 4)
64 #define IMC_CTAD_READ(dev, c, ofs, w) \
65 pcib_read_config((dev), pci_get_bus((dev)), \
66 PCISLOT_E5_IMC_CTAD, PCIFUNC_E5_IMC_CTAD((c)), (ofs), w)
67 #define IMC_CTAD_READ_2(dev, c, ofs) IMC_CTAD_READ((dev), (c), (ofs), 2)
68 #define IMC_CTAD_READ_4(dev, c, ofs) IMC_CTAD_READ((dev), (c), (ofs), 4)
79 int rank_dimm; /* owner dimm */
80 int rank_dimm_rank; /* rank within the owner dimm */
88 struct ecc_e5_rank ecc_rank[PCI_E5_IMC_ERROR_RANK_MAX];
89 struct callout ecc_callout;
92 #define ecc_printf(sc, fmt, arg...) \
93 device_printf((sc)->ecc_dev, fmt , ##arg)
95 static int ecc_e5_probe(device_t);
96 static int ecc_e5_attach(device_t);
97 static int ecc_e5_detach(device_t);
98 static void ecc_e5_shutdown(device_t);
100 static void ecc_e5_callout(void *);
102 #define ECC_E5_TYPE_V2(c) \
104 .did = PCI_E5_IMC_ERROR_CHN##c##_DID_ID, \
105 .slot = PCISLOT_E5_IMC_ERROR, \
106 .func = PCIFUNC_E5_IMC_ERROR_CHN##c, \
108 .desc = "Intel E5 v2 ECC" \
111 #define ECC_E5_TYPE_END { 0, 0, 0, 0, NULL }
113 static const struct ecc_e5_type ecc_types[] = {
122 #undef ECC_E5_TYPE_V2
123 #undef ECC_E5_TYPE_END
125 static device_method_t ecc_e5_methods[] = {
126 /* Device interface */
127 DEVMETHOD(device_probe, ecc_e5_probe),
128 DEVMETHOD(device_attach, ecc_e5_attach),
129 DEVMETHOD(device_detach, ecc_e5_detach),
130 DEVMETHOD(device_shutdown, ecc_e5_shutdown),
131 DEVMETHOD(device_suspend, bus_generic_suspend),
132 DEVMETHOD(device_resume, bus_generic_resume),
136 static driver_t ecc_e5_driver = {
139 sizeof(struct ecc_e5_softc)
141 static devclass_t ecc_devclass;
142 DRIVER_MODULE(ecc_e5, pci, ecc_e5_driver, ecc_devclass, NULL, NULL);
143 MODULE_DEPEND(ecc_e5, pci, 1, 1, 1);
146 ecc_e5_probe(device_t dev)
148 const struct ecc_e5_type *t;
152 vid = pci_get_vendor(dev);
153 if (vid != PCI_E5_VID_ID)
156 did = pci_get_device(dev);
157 slot = pci_get_slot(dev);
158 func = pci_get_function(dev);
160 for (t = ecc_types; t->desc != NULL; ++t) {
161 if (t->did == did && t->slot == slot && t->func == func) {
162 struct ecc_e5_softc *sc = device_get_softc(dev);
167 /* Check CPGC vid/did */
168 if (IMC_CPGC_READ_2(dev, PCIR_VENDOR) !=
170 IMC_CPGC_READ_2(dev, PCIR_DEVICE) !=
171 PCI_E5_IMC_CPGC_DID_ID)
174 /* Is this channel disabled */
175 val = IMC_CPGC_READ_4(dev, PCI_E5_IMC_CPGC_MCMTR);
176 if (val & PCI_E5_IMC_CPGC_MCMTR_CHN_DISABLE(t->chan))
179 /* Check CTAD vid/did */
180 if (IMC_CTAD_READ_2(dev, t->chan, PCIR_VENDOR) !=
182 IMC_CTAD_READ_2(dev, t->chan, PCIR_DEVICE) !=
183 PCI_E5_IMC_CTAD_DID_ID(t->chan))
186 /* Are there any DIMMs populated? */
187 for (dimm = 0; dimm < PCI_E5_IMC_DIMM_MAX; ++dimm) {
188 val = IMC_CTAD_READ_4(dev, t->chan,
189 PCI_E5_IMC_CTAD_DIMMMTR(dimm));
190 if ((val & PCI_E5_IMC_CTAD_DIMMMTR_DIMM_POP) &&
191 (val & PCI_E5_IMC_CTAD_DIMMMTR_RANK_DISABLE_ALL)
192 != PCI_E5_IMC_CTAD_DIMMMTR_RANK_DISABLE_ALL)
195 if (dimm == PCI_E5_IMC_DIMM_MAX)
198 /* Check UBOX vid/did */
199 if (UBOX_READ_2(dev, PCIR_VENDOR) != PCI_E5_VID_ID ||
200 UBOX_READ_2(dev, PCIR_DEVICE) !=
204 val = UBOX_READ_4(dev, PCI_E5_UBOX0_CPUNODEID);
205 node = __SHIFTOUT(val,
206 PCI_E5_UBOX0_CPUNODEID_LCLNODEID);
208 ksnprintf(desc, sizeof(desc), "%s node%d, channel%d",
209 t->desc, node, t->chan);
210 device_set_desc_copy(dev, desc);
212 sc->ecc_chan = t->chan;
221 ecc_e5_attach(device_t dev)
223 struct ecc_e5_softc *sc = device_get_softc(dev);
225 uint32_t dimmmtr[PCI_E5_IMC_DIMM_MAX];
228 callout_init_mp(&sc->ecc_callout);
231 mcmtr = IMC_CPGC_READ_4(sc->ecc_dev, PCI_E5_IMC_CPGC_MCMTR);
233 if (__SHIFTOUT(mcmtr, PCI_E5_IMC_CPGC_MCMTR_IMC_MODE) ==
234 PCI_E5_IMC_CPGC_MCMTR_IMC_MODE_DDR3)
235 ecc_printf(sc, "native DDR3\n");
239 for (dimm = 0; dimm < PCI_E5_IMC_DIMM_MAX; ++dimm) {
245 dimmmtr[dimm] = IMC_CTAD_READ_4(sc->ecc_dev, sc->ecc_chan,
246 PCI_E5_IMC_CTAD_DIMMMTR(dimm));
248 if ((dimmmtr[dimm] & PCI_E5_IMC_CTAD_DIMMMTR_DIMM_POP) == 0)
251 val = __SHIFTOUT(dimmmtr[dimm],
252 PCI_E5_IMC_CTAD_DIMMMTR_RANK_CNT);
254 case PCI_E5_IMC_CTAD_DIMMMTR_RANK_CNT_SR:
257 case PCI_E5_IMC_CTAD_DIMMMTR_RANK_CNT_DR:
260 case PCI_E5_IMC_CTAD_DIMMMTR_RANK_CNT_QR:
264 ecc_printf(sc, "unknown rank count 0x%x\n", val);
268 val = __SHIFTOUT(dimmmtr[dimm],
269 PCI_E5_IMC_CTAD_DIMMMTR_DDR3_WIDTH);
271 case PCI_E5_IMC_CTAD_DIMMMTR_DDR3_WIDTH_4:
274 case PCI_E5_IMC_CTAD_DIMMMTR_DDR3_WIDTH_8:
277 case PCI_E5_IMC_CTAD_DIMMMTR_DDR3_WIDTH_16:
281 ecc_printf(sc, "unknown ddr3 width 0x%x\n", val);
285 val = __SHIFTOUT(dimmmtr[dimm],
286 PCI_E5_IMC_CTAD_DIMMMTR_DDR3_DNSTY);
288 case PCI_E5_IMC_CTAD_DIMMMTR_DDR3_DNSTY_1G:
291 case PCI_E5_IMC_CTAD_DIMMMTR_DDR3_DNSTY_2G:
294 case PCI_E5_IMC_CTAD_DIMMMTR_DDR3_DNSTY_4G:
297 case PCI_E5_IMC_CTAD_DIMMMTR_DDR3_DNSTY_8G:
301 ecc_printf(sc, "unknown ddr3 density 0x%x\n", val);
306 ecc_printf(sc, "DIMM%d %dGB, %d%s, density %dGB\n",
307 dimm, density * rank_cnt * 2,
308 rank_cnt, width, density);
311 for (r = 0; r < rank_cnt; ++r) {
312 struct ecc_e5_rank *rk;
314 if (rank >= PCI_E5_IMC_ERROR_RANK_MAX) {
315 ecc_printf(sc, "too many ranks\n");
318 rk = &sc->ecc_rank[rank];
320 rk->rank_dimm = dimm;
321 rk->rank_dimm_rank = r;
326 sc->ecc_rank_cnt = rank;
328 if ((mcmtr & PCI_E5_IMC_CPGC_MCMTR_ECC_EN) == 0) {
329 ecc_printf(sc, "ECC is not enabled\n");
334 for (rank = 0; rank < sc->ecc_rank_cnt; ++rank) {
335 const struct ecc_e5_rank *rk = &sc->ecc_rank[rank];
339 ofs = PCI_E5_IMC_ERROR_COR_ERR_TH(rank / 2);
341 mask = PCI_E5_IMC_ERROR_COR_ERR_TH_HI;
343 mask = PCI_E5_IMC_ERROR_COR_ERR_TH_HI;
345 thr = pci_read_config(sc->ecc_dev, ofs, 4);
346 ecc_printf(sc, "DIMM%d rank%d, "
347 "corrected error threshold %d\n",
348 rk->rank_dimm, rk->rank_dimm_rank,
349 __SHIFTOUT(thr, mask));
353 callout_reset(&sc->ecc_callout, hz, ecc_e5_callout, sc);
358 ecc_e5_callout(void *xsc)
360 struct ecc_e5_softc *sc = xsc;
361 uint32_t err_ranks, val;
363 val = pci_read_config(sc->ecc_dev, PCI_E5_IMC_ERROR_COR_ERR_STAT, 4);
365 err_ranks = (val & PCI_E5_IMC_ERROR_COR_ERR_STAT_RANKS);
366 while (err_ranks != 0) {
369 rank = ffs(err_ranks) - 1;
370 err_ranks &= ~(1 << rank);
372 if (rank < sc->ecc_rank_cnt) {
373 const struct ecc_e5_rank *rk = &sc->ecc_rank[rank];
377 ofs = PCI_E5_IMC_ERROR_COR_ERR_CNT(rank / 2);
379 mask = PCI_E5_IMC_ERROR_COR_ERR_CNT_HI;
381 mask = PCI_E5_IMC_ERROR_COR_ERR_CNT_LO;
383 err = pci_read_config(sc->ecc_dev, ofs, 4);
384 ecc_printf(sc, "node%d channel%d DIMM%d rank%d, "
385 "too many errors %d",
386 sc->ecc_node, sc->ecc_chan,
387 rk->rank_dimm, rk->rank_dimm_rank,
388 __SHIFTOUT(err, mask));
392 if (val & PCI_E5_IMC_ERROR_COR_ERR_STAT_RANKS) {
393 pci_write_config(sc->ecc_dev, PCI_E5_IMC_ERROR_COR_ERR_STAT,
396 callout_reset(&sc->ecc_callout, hz, ecc_e5_callout, sc);
400 ecc_e5_stop(device_t dev)
402 struct ecc_e5_softc *sc = device_get_softc(dev);
404 callout_stop_sync(&sc->ecc_callout);
408 ecc_e5_detach(device_t dev)
415 ecc_e5_shutdown(device_t dev)