^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Driver for Intel(R) 10nm server memory controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2019, Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/cpu_device_id.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/intel-family.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/mce.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "edac_module.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "skx_common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define I10NM_REVISION "v0.0.3"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define EDAC_MOD_STR "i10nm_edac"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) /* Debug macros */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define i10nm_printk(level, fmt, arg...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) edac_printk(level, "i10nm", fmt, ##arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define I10NM_GET_SCK_BAR(d, reg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) pci_read_config_dword((d)->uracu, 0xd0, &(reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define I10NM_GET_IMC_BAR(d, i, reg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) pci_read_config_dword((d)->uracu, 0xd8 + (i) * 4, &(reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define I10NM_GET_DIMMMTR(m, i, j) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) readl((m)->mbase + 0x2080c + (i) * 0x4000 + (j) * 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define I10NM_GET_MCDDRTCFG(m, i) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) readl((m)->mbase + 0x20970 + (i) * 0x4000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define I10NM_GET_MCMTR(m, i) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) readl((m)->mbase + 0x20ef8 + (i) * 0x4000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define I10NM_GET_SCK_MMIO_BASE(reg) (GET_BITFIELD(reg, 0, 28) << 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define I10NM_GET_IMC_MMIO_OFFSET(reg) (GET_BITFIELD(reg, 0, 10) << 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define I10NM_GET_IMC_MMIO_SIZE(reg) ((GET_BITFIELD(reg, 13, 23) - \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) GET_BITFIELD(reg, 0, 10) + 1) << 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static struct list_head *i10nm_edac_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static struct pci_dev *pci_get_dev_wrapper(int dom, unsigned int bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) unsigned int dev, unsigned int fun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) pdev = pci_get_domain_bus_and_slot(dom, bus, PCI_DEVFN(dev, fun));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (!pdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) edac_dbg(2, "No device %02x:%02x.%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) bus, dev, fun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) if (unlikely(pci_enable_device(pdev) < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) edac_dbg(2, "Failed to enable device %02x:%02x.%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) bus, dev, fun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) pci_dev_get(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static int i10nm_get_all_munits(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct pci_dev *mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) void __iomem *mbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct skx_dev *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) int i, j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) u32 reg, off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) u64 base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) list_for_each_entry(d, i10nm_edac_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) d->util_all = pci_get_dev_wrapper(d->seg, d->bus[1], 29, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (!d->util_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) d->uracu = pci_get_dev_wrapper(d->seg, d->bus[0], 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (!d->uracu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (I10NM_GET_SCK_BAR(d, reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) i10nm_printk(KERN_ERR, "Failed to socket bar\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) base = I10NM_GET_SCK_MMIO_BASE(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) edac_dbg(2, "socket%d mmio base 0x%llx (reg 0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) j++, base, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) for (i = 0; i < I10NM_NUM_IMC; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) mdev = pci_get_dev_wrapper(d->seg, d->bus[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) 12 + i, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (i == 0 && !mdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) i10nm_printk(KERN_ERR, "No IMC found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (!mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) d->imc[i].mdev = mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (I10NM_GET_IMC_BAR(d, i, reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) i10nm_printk(KERN_ERR, "Failed to get mc bar\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) off = I10NM_GET_IMC_MMIO_OFFSET(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) size = I10NM_GET_IMC_MMIO_SIZE(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) edac_dbg(2, "mc%d mmio base 0x%llx size 0x%lx (reg 0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) i, base + off, size, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) mbase = ioremap(base + off, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (!mbase) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) i10nm_printk(KERN_ERR, "Failed to ioremap 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) base + off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) d->imc[i].mbase = mbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static struct res_config i10nm_cfg0 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) .type = I10NM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) .decs_did = 0x3452,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) .busno_cfg_offset = 0xcc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static struct res_config i10nm_cfg1 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) .type = I10NM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) .decs_did = 0x3452,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) .busno_cfg_offset = 0xd0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static const struct x86_cpu_id i10nm_cpuids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_D, X86_STEPPINGS(0x0, 0xf), &i10nm_cfg1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static bool i10nm_check_ecc(struct skx_imc *imc, int chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) u32 mcmtr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) mcmtr = I10NM_GET_MCMTR(imc, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) edac_dbg(1, "ch%d mcmtr reg %x\n", chan, mcmtr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return !!GET_BITFIELD(mcmtr, 2, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static int i10nm_get_dimm_config(struct mem_ctl_info *mci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct skx_pvt *pvt = mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct skx_imc *imc = pvt->imc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct dimm_info *dimm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) u32 mtr, mcddrtcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) int i, j, ndimms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) for (i = 0; i < I10NM_NUM_CHANNELS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (!imc->mbase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) ndimms = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) for (j = 0; j < I10NM_NUM_DIMMS; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) dimm = edac_get_dimm(mci, i, j, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) mtr = I10NM_GET_DIMMMTR(imc, i, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) edac_dbg(1, "dimmmtr 0x%x mcddrtcfg 0x%x (mc%d ch%d dimm%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) mtr, mcddrtcfg, imc->mc, i, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (IS_DIMM_PRESENT(mtr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) ndimms += skx_get_dimm_info(mtr, 0, 0, dimm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) imc, i, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) else if (IS_NVDIMM_PRESENT(mcddrtcfg, j))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) ndimms += skx_get_nvdimm_info(dimm, imc, i, j,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) EDAC_MOD_STR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (ndimms && !i10nm_check_ecc(imc, i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) i10nm_printk(KERN_ERR, "ECC is disabled on imc %d channel %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) imc->mc, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static struct notifier_block i10nm_mce_dec = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) .notifier_call = skx_mce_check_error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) .priority = MCE_PRIO_EDAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #ifdef CONFIG_EDAC_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * Debug feature.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * Exercise the address decode logic by writing an address to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * /sys/kernel/debug/edac/i10nm_test/addr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static struct dentry *i10nm_test;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static int debugfs_u64_set(void *data, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct mce m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) memset(&m, 0, sizeof(m));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /* ADDRV + MemRd + Unknown channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) m.status = MCI_STATUS_ADDRV + 0x90;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /* One corrected error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) m.status |= BIT_ULL(MCI_STATUS_CEC_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) m.addr = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) skx_mce_check_error(NULL, 0, &m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static void setup_i10nm_debug(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) i10nm_test = edac_debugfs_create_dir("i10nm_test");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (!i10nm_test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (!edac_debugfs_create_file("addr", 0200, i10nm_test,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) NULL, &fops_u64_wo)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) debugfs_remove(i10nm_test);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) i10nm_test = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static void teardown_i10nm_debug(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) debugfs_remove_recursive(i10nm_test);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static inline void setup_i10nm_debug(void) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static inline void teardown_i10nm_debug(void) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) #endif /*CONFIG_EDAC_DEBUG*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static int __init i10nm_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) u8 mc = 0, src_id = 0, node_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) const struct x86_cpu_id *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct res_config *cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) const char *owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct skx_dev *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) int rc, i, off[3] = {0xd0, 0xc8, 0xcc};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) u64 tolm, tohm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) edac_dbg(2, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) owner = edac_get_owner();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) id = x86_match_cpu(i10nm_cpuids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (!id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) cfg = (struct res_config *)id->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) rc = skx_get_hi_lo(0x09a2, off, &tolm, &tohm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) rc = skx_get_all_bus_mappings(cfg, &i10nm_edac_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (rc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) i10nm_printk(KERN_ERR, "No memory controllers found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) rc = i10nm_get_all_munits();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) list_for_each_entry(d, i10nm_edac_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) rc = skx_get_src_id(d, 0xf8, &src_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) rc = skx_get_node_id(d, &node_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) edac_dbg(2, "src_id = %d node_id = %d\n", src_id, node_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) for (i = 0; i < I10NM_NUM_IMC; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (!d->imc[i].mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) d->imc[i].mc = mc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) d->imc[i].lmc = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) d->imc[i].src_id = src_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) d->imc[i].node_id = node_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) rc = skx_register_mci(&d->imc[i], d->imc[i].mdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) "Intel_10nm Socket", EDAC_MOD_STR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) i10nm_get_dimm_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) rc = skx_adxl_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) opstate_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) mce_register_decode_chain(&i10nm_mce_dec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) setup_i10nm_debug();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) i10nm_printk(KERN_INFO, "%s\n", I10NM_REVISION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) skx_remove();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static void __exit i10nm_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) edac_dbg(2, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) teardown_i10nm_debug();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) mce_unregister_decode_chain(&i10nm_mce_dec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) skx_adxl_put();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) skx_remove();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) module_init(i10nm_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) module_exit(i10nm_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) MODULE_DESCRIPTION("MC Driver for Intel 10nm server processors");