^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * APM X-Gene SoC EDAC (error detection and correction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2015, Applied Micro Circuits Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: Feng Kan <fkan@apm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Loc Ho <lho@apm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/ctype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/edac.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/mfd/syscon.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/regmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "edac_module.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define EDAC_MOD_STR "xgene_edac"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) /* Global error configuration status registers (CSR) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define PCPHPERRINTSTS 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define PCPHPERRINTMSK 0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define MCU_CTL_ERR_MASK BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define IOB_PA_ERR_MASK BIT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define IOB_BA_ERR_MASK BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define IOB_XGIC_ERR_MASK BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define IOB_RB_ERR_MASK BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define L3C_UNCORR_ERR_MASK BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define MCU_UNCORR_ERR_MASK BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define PMD3_MERR_MASK BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define PMD2_MERR_MASK BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define PMD1_MERR_MASK BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define PMD0_MERR_MASK BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define PCPLPERRINTSTS 0x0008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define PCPLPERRINTMSK 0x000C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define CSW_SWITCH_TRACE_ERR_MASK BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define L3C_CORR_ERR_MASK BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define MCU_CORR_ERR_MASK BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define MEMERRINTSTS 0x0010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define MEMERRINTMSK 0x0014
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct xgene_edac {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct regmap *csw_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct regmap *mcba_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct regmap *mcbb_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct regmap *efuse_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct regmap *rb_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) void __iomem *pcp_csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct dentry *dfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct list_head mcus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct list_head pmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct list_head l3s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct list_head socs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct mutex mc_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) int mc_active_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) int mc_registered_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static void xgene_edac_pcp_rd(struct xgene_edac *edac, u32 reg, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) *val = readl(edac->pcp_csr + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static void xgene_edac_pcp_clrbits(struct xgene_edac *edac, u32 reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) u32 bits_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) spin_lock(&edac->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) val = readl(edac->pcp_csr + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) val &= ~bits_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) writel(val, edac->pcp_csr + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) spin_unlock(&edac->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static void xgene_edac_pcp_setbits(struct xgene_edac *edac, u32 reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) u32 bits_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) spin_lock(&edac->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) val = readl(edac->pcp_csr + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) val |= bits_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) writel(val, edac->pcp_csr + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) spin_unlock(&edac->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* Memory controller error CSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define MCU_MAX_RANK 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define MCU_RANK_STRIDE 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define MCUGECR 0x0110
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define MCU_GECR_DEMANDUCINTREN_MASK BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define MCU_GECR_BACKUCINTREN_MASK BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define MCU_GECR_CINTREN_MASK BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define MUC_GECR_MCUADDRERREN_MASK BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define MCUGESR 0x0114
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define MCU_GESR_ADDRNOMATCH_ERR_MASK BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define MCU_GESR_ADDRMULTIMATCH_ERR_MASK BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define MCU_GESR_PHYP_ERR_MASK BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define MCUESRR0 0x0314
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define MCU_ESRR_MULTUCERR_MASK BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define MCU_ESRR_BACKUCERR_MASK BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define MCU_ESRR_DEMANDUCERR_MASK BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define MCU_ESRR_CERR_MASK BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define MCUESRRA0 0x0318
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define MCUEBLRR0 0x031c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define MCU_EBLRR_ERRBANK_RD(src) (((src) & 0x00000007) >> 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define MCUERCRR0 0x0320
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define MCU_ERCRR_ERRROW_RD(src) (((src) & 0xFFFF0000) >> 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define MCU_ERCRR_ERRCOL_RD(src) ((src) & 0x00000FFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define MCUSBECNT0 0x0324
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define MCU_SBECNT_COUNT(src) ((src) & 0xFFFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define CSW_CSWCR 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define CSW_CSWCR_DUALMCB_MASK BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define MCBADDRMR 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define MCBADDRMR_MCU_INTLV_MODE_MASK BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define MCBADDRMR_DUALMCU_MODE_MASK BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define MCBADDRMR_MCB_INTLV_MODE_MASK BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define MCBADDRMR_ADDRESS_MODE_MASK BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct xgene_edac_mc_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct list_head next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct mem_ctl_info *mci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct xgene_edac *edac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) void __iomem *mcu_csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) u32 mcu_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static ssize_t xgene_edac_mc_err_inject_write(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) const char __user *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct mem_ctl_info *mci = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct xgene_edac_mc_ctx *ctx = mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) for (i = 0; i < MCU_MAX_RANK; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) writel(MCU_ESRR_MULTUCERR_MASK | MCU_ESRR_BACKUCERR_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) MCU_ESRR_DEMANDUCERR_MASK | MCU_ESRR_CERR_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) ctx->mcu_csr + MCUESRRA0 + i * MCU_RANK_STRIDE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static const struct file_operations xgene_edac_mc_debug_inject_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) .open = simple_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) .write = xgene_edac_mc_err_inject_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) .llseek = generic_file_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static void xgene_edac_mc_create_debugfs_node(struct mem_ctl_info *mci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (!IS_ENABLED(CONFIG_EDAC_DEBUG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (!mci->debugfs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) edac_debugfs_create_file("inject_ctrl", S_IWUSR, mci->debugfs, mci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) &xgene_edac_mc_debug_inject_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static void xgene_edac_mc_check(struct mem_ctl_info *mci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct xgene_edac_mc_ctx *ctx = mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) unsigned int pcp_hp_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) unsigned int pcp_lp_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) u32 rank;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) u32 bank;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) u32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) u32 col_row;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) xgene_edac_pcp_rd(ctx->edac, PCPHPERRINTSTS, &pcp_hp_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) xgene_edac_pcp_rd(ctx->edac, PCPLPERRINTSTS, &pcp_lp_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (!((MCU_UNCORR_ERR_MASK & pcp_hp_stat) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) (MCU_CTL_ERR_MASK & pcp_hp_stat) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) (MCU_CORR_ERR_MASK & pcp_lp_stat)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) for (rank = 0; rank < MCU_MAX_RANK; rank++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) reg = readl(ctx->mcu_csr + MCUESRR0 + rank * MCU_RANK_STRIDE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /* Detect uncorrectable memory error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (reg & (MCU_ESRR_DEMANDUCERR_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) MCU_ESRR_BACKUCERR_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /* Detected uncorrectable memory error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) edac_mc_chipset_printk(mci, KERN_ERR, "X-Gene",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) "MCU uncorrectable error at rank %d\n", rank);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 1, 0, 0, 0, 0, 0, -1, mci->ctl_name, "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /* Detect correctable memory error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (reg & MCU_ESRR_CERR_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) bank = readl(ctx->mcu_csr + MCUEBLRR0 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) rank * MCU_RANK_STRIDE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) col_row = readl(ctx->mcu_csr + MCUERCRR0 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) rank * MCU_RANK_STRIDE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) count = readl(ctx->mcu_csr + MCUSBECNT0 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) rank * MCU_RANK_STRIDE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) edac_mc_chipset_printk(mci, KERN_WARNING, "X-Gene",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) "MCU correctable error at rank %d bank %d column %d row %d count %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) rank, MCU_EBLRR_ERRBANK_RD(bank),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) MCU_ERCRR_ERRCOL_RD(col_row),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) MCU_ERCRR_ERRROW_RD(col_row),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) MCU_SBECNT_COUNT(count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 1, 0, 0, 0, 0, 0, -1, mci->ctl_name, "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /* Clear all error registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) writel(0x0, ctx->mcu_csr + MCUEBLRR0 + rank * MCU_RANK_STRIDE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) writel(0x0, ctx->mcu_csr + MCUERCRR0 + rank * MCU_RANK_STRIDE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) writel(0x0, ctx->mcu_csr + MCUSBECNT0 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) rank * MCU_RANK_STRIDE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) writel(reg, ctx->mcu_csr + MCUESRR0 + rank * MCU_RANK_STRIDE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /* Detect memory controller error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) reg = readl(ctx->mcu_csr + MCUGESR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (reg & MCU_GESR_ADDRNOMATCH_ERR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) edac_mc_chipset_printk(mci, KERN_WARNING, "X-Gene",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) "MCU address miss-match error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (reg & MCU_GESR_ADDRMULTIMATCH_ERR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) edac_mc_chipset_printk(mci, KERN_WARNING, "X-Gene",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) "MCU address multi-match error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) writel(reg, ctx->mcu_csr + MCUGESR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static void xgene_edac_mc_irq_ctl(struct mem_ctl_info *mci, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct xgene_edac_mc_ctx *ctx = mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (edac_op_state != EDAC_OPSTATE_INT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) mutex_lock(&ctx->edac->mc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * As there is only single bit for enable error and interrupt mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * we must only enable top level interrupt after all MCUs are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * registered. Otherwise, if there is an error and the corresponding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * MCU has not registered, the interrupt will never get cleared. To
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * determine all MCU have registered, we will keep track of active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * MCUs and registered MCUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /* Set registered MCU bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) ctx->edac->mc_registered_mask |= 1 << ctx->mcu_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /* Enable interrupt after all active MCU registered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (ctx->edac->mc_registered_mask ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) ctx->edac->mc_active_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /* Enable memory controller top level interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) xgene_edac_pcp_clrbits(ctx->edac, PCPHPERRINTMSK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) MCU_UNCORR_ERR_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) MCU_CTL_ERR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) xgene_edac_pcp_clrbits(ctx->edac, PCPLPERRINTMSK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) MCU_CORR_ERR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /* Enable MCU interrupt and error reporting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) val = readl(ctx->mcu_csr + MCUGECR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) val |= MCU_GECR_DEMANDUCINTREN_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) MCU_GECR_BACKUCINTREN_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) MCU_GECR_CINTREN_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) MUC_GECR_MCUADDRERREN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) writel(val, ctx->mcu_csr + MCUGECR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /* Disable MCU interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) val = readl(ctx->mcu_csr + MCUGECR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) val &= ~(MCU_GECR_DEMANDUCINTREN_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) MCU_GECR_BACKUCINTREN_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) MCU_GECR_CINTREN_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) MUC_GECR_MCUADDRERREN_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) writel(val, ctx->mcu_csr + MCUGECR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /* Disable memory controller top level interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) xgene_edac_pcp_setbits(ctx->edac, PCPHPERRINTMSK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) MCU_UNCORR_ERR_MASK | MCU_CTL_ERR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) xgene_edac_pcp_setbits(ctx->edac, PCPLPERRINTMSK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) MCU_CORR_ERR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /* Clear registered MCU bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) ctx->edac->mc_registered_mask &= ~(1 << ctx->mcu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) mutex_unlock(&ctx->edac->mc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static int xgene_edac_mc_is_active(struct xgene_edac_mc_ctx *ctx, int mc_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) unsigned int reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) u32 mcu_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (regmap_read(ctx->edac->csw_map, CSW_CSWCR, ®))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (reg & CSW_CSWCR_DUALMCB_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * Dual MCB active - Determine if all 4 active or just MCU0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * and MCU2 active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (regmap_read(ctx->edac->mcbb_map, MCBADDRMR, ®))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) mcu_mask = (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0xF : 0x5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * Single MCB active - Determine if MCU0/MCU1 or just MCU0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (regmap_read(ctx->edac->mcba_map, MCBADDRMR, ®))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) mcu_mask = (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0x3 : 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /* Save active MC mask if hasn't set already */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (!ctx->edac->mc_active_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) ctx->edac->mc_active_mask = mcu_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return (mcu_mask & (1 << mc_idx)) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static int xgene_edac_mc_add(struct xgene_edac *edac, struct device_node *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct mem_ctl_info *mci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct edac_mc_layer layers[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct xgene_edac_mc_ctx tmp_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct xgene_edac_mc_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct resource res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) memset(&tmp_ctx, 0, sizeof(tmp_ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) tmp_ctx.edac = edac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (!devres_open_group(edac->dev, xgene_edac_mc_add, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) rc = of_address_to_resource(np, 0, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) dev_err(edac->dev, "no MCU resource address\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) goto err_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) tmp_ctx.mcu_csr = devm_ioremap_resource(edac->dev, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (IS_ERR(tmp_ctx.mcu_csr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) dev_err(edac->dev, "unable to map MCU resource\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) rc = PTR_ERR(tmp_ctx.mcu_csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) goto err_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) /* Ignore non-active MCU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (of_property_read_u32(np, "memory-controller", &tmp_ctx.mcu_id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) dev_err(edac->dev, "no memory-controller property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) goto err_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (!xgene_edac_mc_is_active(&tmp_ctx, tmp_ctx.mcu_id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) goto err_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) layers[0].size = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) layers[0].is_virt_csrow = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) layers[1].type = EDAC_MC_LAYER_CHANNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) layers[1].size = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) layers[1].is_virt_csrow = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) mci = edac_mc_alloc(tmp_ctx.mcu_id, ARRAY_SIZE(layers), layers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) sizeof(*ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (!mci) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) goto err_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) ctx = mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) *ctx = tmp_ctx; /* Copy over resource value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) ctx->name = "xgene_edac_mc_err";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) ctx->mci = mci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) mci->pdev = &mci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) mci->ctl_name = ctx->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) mci->dev_name = ctx->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_RDDR2 | MEM_FLAG_RDDR3 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) MEM_FLAG_DDR | MEM_FLAG_DDR2 | MEM_FLAG_DDR3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) mci->edac_ctl_cap = EDAC_FLAG_SECDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) mci->edac_cap = EDAC_FLAG_SECDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) mci->mod_name = EDAC_MOD_STR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) mci->ctl_page_to_phys = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) mci->scrub_cap = SCRUB_FLAG_HW_SRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) mci->scrub_mode = SCRUB_HW_SRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (edac_op_state == EDAC_OPSTATE_POLL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) mci->edac_check = xgene_edac_mc_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (edac_mc_add_mc(mci)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) dev_err(edac->dev, "edac_mc_add_mc failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) xgene_edac_mc_create_debugfs_node(mci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) list_add(&ctx->next, &edac->mcus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) xgene_edac_mc_irq_ctl(mci, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) devres_remove_group(edac->dev, xgene_edac_mc_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) dev_info(edac->dev, "X-Gene EDAC MC registered\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) edac_mc_free(mci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) err_group:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) devres_release_group(edac->dev, xgene_edac_mc_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) static int xgene_edac_mc_remove(struct xgene_edac_mc_ctx *mcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) xgene_edac_mc_irq_ctl(mcu->mci, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) edac_mc_del_mc(&mcu->mci->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) edac_mc_free(mcu->mci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) /* CPU L1/L2 error CSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) #define MAX_CPU_PER_PMD 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) #define CPU_CSR_STRIDE 0x00100000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) #define CPU_L2C_PAGE 0x000D0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) #define CPU_MEMERR_L2C_PAGE 0x000E0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) #define CPU_MEMERR_CPU_PAGE 0x000F0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) #define MEMERR_CPU_ICFECR_PAGE_OFFSET 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) #define MEMERR_CPU_ICFESR_PAGE_OFFSET 0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) #define MEMERR_CPU_ICFESR_ERRWAY_RD(src) (((src) & 0xFF000000) >> 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) #define MEMERR_CPU_ICFESR_ERRINDEX_RD(src) (((src) & 0x003F0000) >> 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) #define MEMERR_CPU_ICFESR_ERRINFO_RD(src) (((src) & 0x0000FF00) >> 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) #define MEMERR_CPU_ICFESR_ERRTYPE_RD(src) (((src) & 0x00000070) >> 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) #define MEMERR_CPU_ICFESR_MULTCERR_MASK BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) #define MEMERR_CPU_ICFESR_CERR_MASK BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) #define MEMERR_CPU_LSUESR_PAGE_OFFSET 0x000c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) #define MEMERR_CPU_LSUESR_ERRWAY_RD(src) (((src) & 0xFF000000) >> 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) #define MEMERR_CPU_LSUESR_ERRINDEX_RD(src) (((src) & 0x003F0000) >> 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) #define MEMERR_CPU_LSUESR_ERRINFO_RD(src) (((src) & 0x0000FF00) >> 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) #define MEMERR_CPU_LSUESR_ERRTYPE_RD(src) (((src) & 0x00000070) >> 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) #define MEMERR_CPU_LSUESR_MULTCERR_MASK BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) #define MEMERR_CPU_LSUESR_CERR_MASK BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) #define MEMERR_CPU_LSUECR_PAGE_OFFSET 0x0008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) #define MEMERR_CPU_MMUECR_PAGE_OFFSET 0x0010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) #define MEMERR_CPU_MMUESR_PAGE_OFFSET 0x0014
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) #define MEMERR_CPU_MMUESR_ERRWAY_RD(src) (((src) & 0xFF000000) >> 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) #define MEMERR_CPU_MMUESR_ERRINDEX_RD(src) (((src) & 0x007F0000) >> 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) #define MEMERR_CPU_MMUESR_ERRINFO_RD(src) (((src) & 0x0000FF00) >> 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) #define MEMERR_CPU_MMUESR_ERRREQSTR_LSU_MASK BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) #define MEMERR_CPU_MMUESR_ERRTYPE_RD(src) (((src) & 0x00000070) >> 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) #define MEMERR_CPU_MMUESR_MULTCERR_MASK BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) #define MEMERR_CPU_MMUESR_CERR_MASK BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) #define MEMERR_CPU_ICFESRA_PAGE_OFFSET 0x0804
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) #define MEMERR_CPU_LSUESRA_PAGE_OFFSET 0x080c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) #define MEMERR_CPU_MMUESRA_PAGE_OFFSET 0x0814
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) #define MEMERR_L2C_L2ECR_PAGE_OFFSET 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) #define MEMERR_L2C_L2ESR_PAGE_OFFSET 0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) #define MEMERR_L2C_L2ESR_ERRSYN_RD(src) (((src) & 0xFF000000) >> 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) #define MEMERR_L2C_L2ESR_ERRWAY_RD(src) (((src) & 0x00FC0000) >> 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) #define MEMERR_L2C_L2ESR_ERRCPU_RD(src) (((src) & 0x00020000) >> 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) #define MEMERR_L2C_L2ESR_ERRGROUP_RD(src) (((src) & 0x0000E000) >> 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) #define MEMERR_L2C_L2ESR_ERRACTION_RD(src) (((src) & 0x00001C00) >> 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) #define MEMERR_L2C_L2ESR_ERRTYPE_RD(src) (((src) & 0x00000300) >> 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) #define MEMERR_L2C_L2ESR_MULTUCERR_MASK BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) #define MEMERR_L2C_L2ESR_MULTICERR_MASK BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) #define MEMERR_L2C_L2ESR_UCERR_MASK BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) #define MEMERR_L2C_L2ESR_ERR_MASK BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) #define MEMERR_L2C_L2EALR_PAGE_OFFSET 0x0008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) #define CPUX_L2C_L2RTOCR_PAGE_OFFSET 0x0010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) #define MEMERR_L2C_L2EAHR_PAGE_OFFSET 0x000c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) #define CPUX_L2C_L2RTOSR_PAGE_OFFSET 0x0014
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) #define MEMERR_L2C_L2RTOSR_MULTERR_MASK BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) #define MEMERR_L2C_L2RTOSR_ERR_MASK BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) #define CPUX_L2C_L2RTOALR_PAGE_OFFSET 0x0018
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) #define CPUX_L2C_L2RTOAHR_PAGE_OFFSET 0x001c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) #define MEMERR_L2C_L2ESRA_PAGE_OFFSET 0x0804
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * Processor Module Domain (PMD) context - Context for a pair of processsors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * Each PMD consists of 2 CPUs and a shared L2 cache. Each CPU consists of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * its own L1 cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct xgene_edac_pmd_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) struct list_head next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) struct device ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct xgene_edac *edac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) struct edac_device_ctl_info *edac_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) void __iomem *pmd_csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) u32 pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) int version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) static void xgene_edac_pmd_l1_check(struct edac_device_ctl_info *edac_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) int cpu_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) void __iomem *pg_f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) pg_f = ctx->pmd_csr + cpu_idx * CPU_CSR_STRIDE + CPU_MEMERR_CPU_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) val = readl(pg_f + MEMERR_CPU_ICFESR_PAGE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (!val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) goto chk_lsu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) "CPU%d L1 memory error ICF 0x%08X Way 0x%02X Index 0x%02X Info 0x%02X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) ctx->pmd * MAX_CPU_PER_PMD + cpu_idx, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) MEMERR_CPU_ICFESR_ERRWAY_RD(val),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) MEMERR_CPU_ICFESR_ERRINDEX_RD(val),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) MEMERR_CPU_ICFESR_ERRINFO_RD(val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (val & MEMERR_CPU_ICFESR_CERR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) dev_err(edac_dev->dev, "One or more correctable error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (val & MEMERR_CPU_ICFESR_MULTCERR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) dev_err(edac_dev->dev, "Multiple correctable error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) switch (MEMERR_CPU_ICFESR_ERRTYPE_RD(val)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) dev_err(edac_dev->dev, "L1 TLB multiple hit\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) dev_err(edac_dev->dev, "Way select multiple hit\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) dev_err(edac_dev->dev, "Physical tag parity error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) dev_err(edac_dev->dev, "L1 data parity error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) dev_err(edac_dev->dev, "L1 pre-decode parity error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /* Clear any HW errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) writel(val, pg_f + MEMERR_CPU_ICFESR_PAGE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (val & (MEMERR_CPU_ICFESR_CERR_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) MEMERR_CPU_ICFESR_MULTCERR_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) chk_lsu:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) val = readl(pg_f + MEMERR_CPU_LSUESR_PAGE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (!val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) goto chk_mmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) "CPU%d memory error LSU 0x%08X Way 0x%02X Index 0x%02X Info 0x%02X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) ctx->pmd * MAX_CPU_PER_PMD + cpu_idx, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) MEMERR_CPU_LSUESR_ERRWAY_RD(val),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) MEMERR_CPU_LSUESR_ERRINDEX_RD(val),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) MEMERR_CPU_LSUESR_ERRINFO_RD(val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (val & MEMERR_CPU_LSUESR_CERR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) dev_err(edac_dev->dev, "One or more correctable error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (val & MEMERR_CPU_LSUESR_MULTCERR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) dev_err(edac_dev->dev, "Multiple correctable error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) switch (MEMERR_CPU_LSUESR_ERRTYPE_RD(val)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) dev_err(edac_dev->dev, "Load tag error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) dev_err(edac_dev->dev, "Load data error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) dev_err(edac_dev->dev, "WSL multihit error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) dev_err(edac_dev->dev, "Store tag error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) "DTB multihit from load pipeline error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) "DTB multihit from store pipeline error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /* Clear any HW errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) writel(val, pg_f + MEMERR_CPU_LSUESR_PAGE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (val & (MEMERR_CPU_LSUESR_CERR_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) MEMERR_CPU_LSUESR_MULTCERR_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) chk_mmu:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) val = readl(pg_f + MEMERR_CPU_MMUESR_PAGE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (!val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) "CPU%d memory error MMU 0x%08X Way 0x%02X Index 0x%02X Info 0x%02X %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) ctx->pmd * MAX_CPU_PER_PMD + cpu_idx, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) MEMERR_CPU_MMUESR_ERRWAY_RD(val),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) MEMERR_CPU_MMUESR_ERRINDEX_RD(val),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) MEMERR_CPU_MMUESR_ERRINFO_RD(val),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) val & MEMERR_CPU_MMUESR_ERRREQSTR_LSU_MASK ? "LSU" : "ICF");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (val & MEMERR_CPU_MMUESR_CERR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) dev_err(edac_dev->dev, "One or more correctable error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (val & MEMERR_CPU_MMUESR_MULTCERR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) dev_err(edac_dev->dev, "Multiple correctable error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) switch (MEMERR_CPU_MMUESR_ERRTYPE_RD(val)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) dev_err(edac_dev->dev, "Stage 1 UTB hit error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) dev_err(edac_dev->dev, "Stage 1 UTB miss error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) dev_err(edac_dev->dev, "Stage 1 UTB allocate error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) dev_err(edac_dev->dev, "TMO operation single bank error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) dev_err(edac_dev->dev, "Stage 2 UTB error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) dev_err(edac_dev->dev, "Stage 2 UTB miss error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) dev_err(edac_dev->dev, "Stage 2 UTB allocate error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) case 7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) dev_err(edac_dev->dev, "TMO operation multiple bank error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) /* Clear any HW errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) writel(val, pg_f + MEMERR_CPU_MMUESR_PAGE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) static void xgene_edac_pmd_l2_check(struct edac_device_ctl_info *edac_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) void __iomem *pg_d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) void __iomem *pg_e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) u32 val_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) u32 val_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) /* Check L2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) pg_e = ctx->pmd_csr + CPU_MEMERR_L2C_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) val = readl(pg_e + MEMERR_L2C_L2ESR_PAGE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (!val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) goto chk_l2c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) val_lo = readl(pg_e + MEMERR_L2C_L2EALR_PAGE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) val_hi = readl(pg_e + MEMERR_L2C_L2EAHR_PAGE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) "PMD%d memory error L2C L2ESR 0x%08X @ 0x%08X.%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) ctx->pmd, val, val_hi, val_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) "ErrSyndrome 0x%02X ErrWay 0x%02X ErrCpu %d ErrGroup 0x%02X ErrAction 0x%02X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) MEMERR_L2C_L2ESR_ERRSYN_RD(val),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) MEMERR_L2C_L2ESR_ERRWAY_RD(val),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) MEMERR_L2C_L2ESR_ERRCPU_RD(val),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) MEMERR_L2C_L2ESR_ERRGROUP_RD(val),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) MEMERR_L2C_L2ESR_ERRACTION_RD(val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (val & MEMERR_L2C_L2ESR_ERR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) dev_err(edac_dev->dev, "One or more correctable error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (val & MEMERR_L2C_L2ESR_MULTICERR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) dev_err(edac_dev->dev, "Multiple correctable error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (val & MEMERR_L2C_L2ESR_UCERR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) dev_err(edac_dev->dev, "One or more uncorrectable error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (val & MEMERR_L2C_L2ESR_MULTUCERR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) dev_err(edac_dev->dev, "Multiple uncorrectable error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) switch (MEMERR_L2C_L2ESR_ERRTYPE_RD(val)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) dev_err(edac_dev->dev, "Outbound SDB parity error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) dev_err(edac_dev->dev, "Inbound SDB parity error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) dev_err(edac_dev->dev, "Tag ECC error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) dev_err(edac_dev->dev, "Data ECC error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) /* Clear any HW errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) writel(val, pg_e + MEMERR_L2C_L2ESR_PAGE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (val & (MEMERR_L2C_L2ESR_ERR_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) MEMERR_L2C_L2ESR_MULTICERR_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) if (val & (MEMERR_L2C_L2ESR_UCERR_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) MEMERR_L2C_L2ESR_MULTUCERR_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) chk_l2c:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) /* Check if any memory request timed out on L2 cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) pg_d = ctx->pmd_csr + CPU_L2C_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) val = readl(pg_d + CPUX_L2C_L2RTOSR_PAGE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) val_lo = readl(pg_d + CPUX_L2C_L2RTOALR_PAGE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) val_hi = readl(pg_d + CPUX_L2C_L2RTOAHR_PAGE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) "PMD%d L2C error L2C RTOSR 0x%08X @ 0x%08X.%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) ctx->pmd, val, val_hi, val_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) writel(val, pg_d + CPUX_L2C_L2RTOSR_PAGE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) static void xgene_edac_pmd_check(struct edac_device_ctl_info *edac_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) unsigned int pcp_hp_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) xgene_edac_pcp_rd(ctx->edac, PCPHPERRINTSTS, &pcp_hp_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) if (!((PMD0_MERR_MASK << ctx->pmd) & pcp_hp_stat))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) /* Check CPU L1 error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) for (i = 0; i < MAX_CPU_PER_PMD; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) xgene_edac_pmd_l1_check(edac_dev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) /* Check CPU L2 error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) xgene_edac_pmd_l2_check(edac_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) static void xgene_edac_pmd_cpu_hw_cfg(struct edac_device_ctl_info *edac_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) void __iomem *pg_f = ctx->pmd_csr + cpu * CPU_CSR_STRIDE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) CPU_MEMERR_CPU_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * Enable CPU memory error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * MEMERR_CPU_ICFESRA, MEMERR_CPU_LSUESRA, and MEMERR_CPU_MMUESRA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) writel(0x00000301, pg_f + MEMERR_CPU_ICFECR_PAGE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) writel(0x00000301, pg_f + MEMERR_CPU_LSUECR_PAGE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) writel(0x00000101, pg_f + MEMERR_CPU_MMUECR_PAGE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) static void xgene_edac_pmd_hw_cfg(struct edac_device_ctl_info *edac_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) void __iomem *pg_d = ctx->pmd_csr + CPU_L2C_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) void __iomem *pg_e = ctx->pmd_csr + CPU_MEMERR_L2C_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) /* Enable PMD memory error - MEMERR_L2C_L2ECR and L2C_L2RTOCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) writel(0x00000703, pg_e + MEMERR_L2C_L2ECR_PAGE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) /* Configure L2C HW request time out feature if supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (ctx->version > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) writel(0x00000119, pg_d + CPUX_L2C_L2RTOCR_PAGE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) static void xgene_edac_pmd_hw_ctl(struct edac_device_ctl_info *edac_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) /* Enable PMD error interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (edac_dev->op_state == OP_RUNNING_INTERRUPT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) xgene_edac_pcp_clrbits(ctx->edac, PCPHPERRINTMSK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) PMD0_MERR_MASK << ctx->pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) xgene_edac_pcp_setbits(ctx->edac, PCPHPERRINTMSK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) PMD0_MERR_MASK << ctx->pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) xgene_edac_pmd_hw_cfg(edac_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) /* Two CPUs per a PMD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) for (i = 0; i < MAX_CPU_PER_PMD; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) xgene_edac_pmd_cpu_hw_cfg(edac_dev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) static ssize_t xgene_edac_pmd_l1_inject_ctrl_write(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) const char __user *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) struct edac_device_ctl_info *edac_dev = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) void __iomem *cpux_pg_f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) for (i = 0; i < MAX_CPU_PER_PMD; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) cpux_pg_f = ctx->pmd_csr + i * CPU_CSR_STRIDE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) CPU_MEMERR_CPU_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) writel(MEMERR_CPU_ICFESR_MULTCERR_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) MEMERR_CPU_ICFESR_CERR_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) cpux_pg_f + MEMERR_CPU_ICFESRA_PAGE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) writel(MEMERR_CPU_LSUESR_MULTCERR_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) MEMERR_CPU_LSUESR_CERR_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) cpux_pg_f + MEMERR_CPU_LSUESRA_PAGE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) writel(MEMERR_CPU_MMUESR_MULTCERR_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) MEMERR_CPU_MMUESR_CERR_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) cpux_pg_f + MEMERR_CPU_MMUESRA_PAGE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) static ssize_t xgene_edac_pmd_l2_inject_ctrl_write(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) const char __user *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) struct edac_device_ctl_info *edac_dev = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) void __iomem *pg_e = ctx->pmd_csr + CPU_MEMERR_L2C_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) writel(MEMERR_L2C_L2ESR_MULTUCERR_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) MEMERR_L2C_L2ESR_MULTICERR_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) MEMERR_L2C_L2ESR_UCERR_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) MEMERR_L2C_L2ESR_ERR_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) pg_e + MEMERR_L2C_L2ESRA_PAGE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) static const struct file_operations xgene_edac_pmd_debug_inject_fops[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) .open = simple_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) .write = xgene_edac_pmd_l1_inject_ctrl_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) .llseek = generic_file_llseek, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) .open = simple_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) .write = xgene_edac_pmd_l2_inject_ctrl_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) .llseek = generic_file_llseek, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) xgene_edac_pmd_create_debugfs_nodes(struct edac_device_ctl_info *edac_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) struct dentry *dbgfs_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) char name[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (!IS_ENABLED(CONFIG_EDAC_DEBUG) || !ctx->edac->dfs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) snprintf(name, sizeof(name), "PMD%d", ctx->pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) dbgfs_dir = edac_debugfs_create_dir_at(name, ctx->edac->dfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (!dbgfs_dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) edac_debugfs_create_file("l1_inject_ctrl", S_IWUSR, dbgfs_dir, edac_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) &xgene_edac_pmd_debug_inject_fops[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) edac_debugfs_create_file("l2_inject_ctrl", S_IWUSR, dbgfs_dir, edac_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) &xgene_edac_pmd_debug_inject_fops[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) static int xgene_edac_pmd_available(u32 efuse, int pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) return (efuse & (1 << pmd)) ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) static int xgene_edac_pmd_add(struct xgene_edac *edac, struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) int version)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) struct edac_device_ctl_info *edac_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) struct xgene_edac_pmd_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) struct resource res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) char edac_name[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) u32 pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) if (!devres_open_group(edac->dev, xgene_edac_pmd_add, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) /* Determine if this PMD is disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) if (of_property_read_u32(np, "pmd-controller", &pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) dev_err(edac->dev, "no pmd-controller property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) goto err_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) rc = regmap_read(edac->efuse_map, 0, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) goto err_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (!xgene_edac_pmd_available(val, pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) goto err_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) snprintf(edac_name, sizeof(edac_name), "l2c%d", pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) edac_dev = edac_device_alloc_ctl_info(sizeof(*ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) edac_name, 1, "l2c", 1, 2, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) 0, edac_device_alloc_index());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) if (!edac_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) goto err_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) ctx = edac_dev->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) ctx->name = "xgene_pmd_err";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) ctx->pmd = pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) ctx->edac = edac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) ctx->edac_dev = edac_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) ctx->ddev = *edac->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) ctx->version = version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) edac_dev->dev = &ctx->ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) edac_dev->ctl_name = ctx->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) edac_dev->dev_name = ctx->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) edac_dev->mod_name = EDAC_MOD_STR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) rc = of_address_to_resource(np, 0, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) dev_err(edac->dev, "no PMD resource address\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) ctx->pmd_csr = devm_ioremap_resource(edac->dev, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) if (IS_ERR(ctx->pmd_csr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) dev_err(edac->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) "devm_ioremap_resource failed for PMD resource address\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) rc = PTR_ERR(ctx->pmd_csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (edac_op_state == EDAC_OPSTATE_POLL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) edac_dev->edac_check = xgene_edac_pmd_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) xgene_edac_pmd_create_debugfs_nodes(edac_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) rc = edac_device_add_device(edac_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (rc > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) dev_err(edac->dev, "edac_device_add_device failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) if (edac_op_state == EDAC_OPSTATE_INT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) edac_dev->op_state = OP_RUNNING_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) list_add(&ctx->next, &edac->pmds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) xgene_edac_pmd_hw_ctl(edac_dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) devres_remove_group(edac->dev, xgene_edac_pmd_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) dev_info(edac->dev, "X-Gene EDAC PMD%d registered\n", ctx->pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) edac_device_free_ctl_info(edac_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) err_group:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) devres_release_group(edac->dev, xgene_edac_pmd_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) static int xgene_edac_pmd_remove(struct xgene_edac_pmd_ctx *pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) struct edac_device_ctl_info *edac_dev = pmd->edac_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) xgene_edac_pmd_hw_ctl(edac_dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) edac_device_del_device(edac_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) edac_device_free_ctl_info(edac_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) /* L3 Error device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) #define L3C_ESR (0x0A * 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) #define L3C_ESR_DATATAG_MASK BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) #define L3C_ESR_MULTIHIT_MASK BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) #define L3C_ESR_UCEVICT_MASK BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) #define L3C_ESR_MULTIUCERR_MASK BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) #define L3C_ESR_MULTICERR_MASK BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) #define L3C_ESR_UCERR_MASK BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) #define L3C_ESR_CERR_MASK BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) #define L3C_ESR_UCERRINTR_MASK BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) #define L3C_ESR_CERRINTR_MASK BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) #define L3C_ECR (0x0B * 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) #define L3C_ECR_UCINTREN BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) #define L3C_ECR_CINTREN BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) #define L3C_UCERREN BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) #define L3C_CERREN BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) #define L3C_ELR (0x0C * 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) #define L3C_ELR_ERRSYN(src) ((src & 0xFF800000) >> 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) #define L3C_ELR_ERRWAY(src) ((src & 0x007E0000) >> 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) #define L3C_ELR_AGENTID(src) ((src & 0x0001E000) >> 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) #define L3C_ELR_ERRGRP(src) ((src & 0x00000F00) >> 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) #define L3C_ELR_OPTYPE(src) ((src & 0x000000F0) >> 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) #define L3C_ELR_PADDRHIGH(src) (src & 0x0000000F)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) #define L3C_AELR (0x0D * 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) #define L3C_BELR (0x0E * 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) #define L3C_BELR_BANK(src) (src & 0x0000000F)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) struct xgene_edac_dev_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) struct list_head next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) struct device ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) struct xgene_edac *edac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) struct edac_device_ctl_info *edac_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) int edac_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) void __iomem *dev_csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) int version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * Version 1 of the L3 controller has broken single bit correctable logic for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * certain error syndromes. Log them as uncorrectable in that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) static bool xgene_edac_l3_promote_to_uc_err(u32 l3cesr, u32 l3celr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) if (l3cesr & L3C_ESR_DATATAG_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) switch (L3C_ELR_ERRSYN(l3celr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) case 0x13C:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) case 0x0B4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) case 0x007:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) case 0x00D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) case 0x00E:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) case 0x019:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) case 0x01A:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) case 0x01C:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) case 0x04E:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) case 0x041:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) } else if (L3C_ELR_ERRWAY(l3celr) == 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) static void xgene_edac_l3_check(struct edac_device_ctl_info *edac_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) u32 l3cesr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) u32 l3celr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) u32 l3caelr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) u32 l3cbelr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) l3cesr = readl(ctx->dev_csr + L3C_ESR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (!(l3cesr & (L3C_ESR_UCERR_MASK | L3C_ESR_CERR_MASK)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) if (l3cesr & L3C_ESR_UCERR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) dev_err(edac_dev->dev, "L3C uncorrectable error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (l3cesr & L3C_ESR_CERR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) dev_warn(edac_dev->dev, "L3C correctable error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) l3celr = readl(ctx->dev_csr + L3C_ELR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) l3caelr = readl(ctx->dev_csr + L3C_AELR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) l3cbelr = readl(ctx->dev_csr + L3C_BELR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) if (l3cesr & L3C_ESR_MULTIHIT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) dev_err(edac_dev->dev, "L3C multiple hit error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) if (l3cesr & L3C_ESR_UCEVICT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) "L3C dropped eviction of line with error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) if (l3cesr & L3C_ESR_MULTIUCERR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) dev_err(edac_dev->dev, "L3C multiple uncorrectable error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) if (l3cesr & L3C_ESR_DATATAG_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) "L3C data error syndrome 0x%X group 0x%X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) L3C_ELR_ERRSYN(l3celr), L3C_ELR_ERRGRP(l3celr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) "L3C tag error syndrome 0x%X Way of Tag 0x%X Agent ID 0x%X Operation type 0x%X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) L3C_ELR_ERRSYN(l3celr), L3C_ELR_ERRWAY(l3celr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) L3C_ELR_AGENTID(l3celr), L3C_ELR_OPTYPE(l3celr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) * NOTE: Address [41:38] in L3C_ELR_PADDRHIGH(l3celr).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) * Address [37:6] in l3caelr. Lower 6 bits are zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) dev_err(edac_dev->dev, "L3C error address 0x%08X.%08X bank %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) L3C_ELR_PADDRHIGH(l3celr) << 6 | (l3caelr >> 26),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) (l3caelr & 0x3FFFFFFF) << 6, L3C_BELR_BANK(l3cbelr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) "L3C error status register value 0x%X\n", l3cesr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) /* Clear L3C error interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) writel(0, ctx->dev_csr + L3C_ESR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) if (ctx->version <= 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) xgene_edac_l3_promote_to_uc_err(l3cesr, l3celr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) if (l3cesr & L3C_ESR_CERR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) if (l3cesr & L3C_ESR_UCERR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) static void xgene_edac_l3_hw_init(struct edac_device_ctl_info *edac_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) val = readl(ctx->dev_csr + L3C_ECR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) val |= L3C_UCERREN | L3C_CERREN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) /* On disable, we just disable interrupt but keep error enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) if (edac_dev->op_state == OP_RUNNING_INTERRUPT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) val |= L3C_ECR_UCINTREN | L3C_ECR_CINTREN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) val &= ~(L3C_ECR_UCINTREN | L3C_ECR_CINTREN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) writel(val, ctx->dev_csr + L3C_ECR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) if (edac_dev->op_state == OP_RUNNING_INTERRUPT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) /* Enable/disable L3 error top level interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) if (enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) xgene_edac_pcp_clrbits(ctx->edac, PCPHPERRINTMSK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) L3C_UNCORR_ERR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) xgene_edac_pcp_clrbits(ctx->edac, PCPLPERRINTMSK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) L3C_CORR_ERR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) xgene_edac_pcp_setbits(ctx->edac, PCPHPERRINTMSK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) L3C_UNCORR_ERR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) xgene_edac_pcp_setbits(ctx->edac, PCPLPERRINTMSK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) L3C_CORR_ERR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) static ssize_t xgene_edac_l3_inject_ctrl_write(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) const char __user *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) struct edac_device_ctl_info *edac_dev = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) /* Generate all errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) writel(0xFFFFFFFF, ctx->dev_csr + L3C_ESR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) static const struct file_operations xgene_edac_l3_debug_inject_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) .open = simple_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) .write = xgene_edac_l3_inject_ctrl_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) .llseek = generic_file_llseek
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) xgene_edac_l3_create_debugfs_nodes(struct edac_device_ctl_info *edac_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) struct dentry *dbgfs_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) char name[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) if (!IS_ENABLED(CONFIG_EDAC_DEBUG) || !ctx->edac->dfs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) snprintf(name, sizeof(name), "l3c%d", ctx->edac_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) dbgfs_dir = edac_debugfs_create_dir_at(name, ctx->edac->dfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) if (!dbgfs_dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) debugfs_create_file("l3_inject_ctrl", S_IWUSR, dbgfs_dir, edac_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) &xgene_edac_l3_debug_inject_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) static int xgene_edac_l3_add(struct xgene_edac *edac, struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) int version)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) struct edac_device_ctl_info *edac_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) struct xgene_edac_dev_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) struct resource res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) void __iomem *dev_csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) int edac_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) if (!devres_open_group(edac->dev, xgene_edac_l3_add, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) rc = of_address_to_resource(np, 0, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) dev_err(edac->dev, "no L3 resource address\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) goto err_release_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) dev_csr = devm_ioremap_resource(edac->dev, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) if (IS_ERR(dev_csr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) dev_err(edac->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) "devm_ioremap_resource failed for L3 resource address\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) rc = PTR_ERR(dev_csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) goto err_release_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) edac_idx = edac_device_alloc_index();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) edac_dev = edac_device_alloc_ctl_info(sizeof(*ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) "l3c", 1, "l3c", 1, 0, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) edac_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) if (!edac_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) goto err_release_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) ctx = edac_dev->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) ctx->dev_csr = dev_csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) ctx->name = "xgene_l3_err";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) ctx->edac_idx = edac_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) ctx->edac = edac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) ctx->edac_dev = edac_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) ctx->ddev = *edac->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) ctx->version = version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) edac_dev->dev = &ctx->ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) edac_dev->ctl_name = ctx->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) edac_dev->dev_name = ctx->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) edac_dev->mod_name = EDAC_MOD_STR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) if (edac_op_state == EDAC_OPSTATE_POLL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) edac_dev->edac_check = xgene_edac_l3_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) xgene_edac_l3_create_debugfs_nodes(edac_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) rc = edac_device_add_device(edac_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) if (rc > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) dev_err(edac->dev, "failed edac_device_add_device()\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) goto err_ctl_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if (edac_op_state == EDAC_OPSTATE_INT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) edac_dev->op_state = OP_RUNNING_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) list_add(&ctx->next, &edac->l3s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) xgene_edac_l3_hw_init(edac_dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) devres_remove_group(edac->dev, xgene_edac_l3_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) dev_info(edac->dev, "X-Gene EDAC L3 registered\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) err_ctl_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) edac_device_free_ctl_info(edac_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) err_release_group:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) devres_release_group(edac->dev, xgene_edac_l3_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) static int xgene_edac_l3_remove(struct xgene_edac_dev_ctx *l3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) struct edac_device_ctl_info *edac_dev = l3->edac_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) xgene_edac_l3_hw_init(edac_dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) edac_device_del_device(l3->edac->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) edac_device_free_ctl_info(edac_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) /* SoC error device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) #define IOBAXIS0TRANSERRINTSTS 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) #define IOBAXIS0_M_ILLEGAL_ACCESS_MASK BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) #define IOBAXIS0_ILLEGAL_ACCESS_MASK BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) #define IOBAXIS0TRANSERRINTMSK 0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) #define IOBAXIS0TRANSERRREQINFOL 0x0008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) #define IOBAXIS0TRANSERRREQINFOH 0x000c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) #define REQTYPE_RD(src) (((src) & BIT(0)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) #define ERRADDRH_RD(src) (((src) & 0xffc00000) >> 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) #define IOBAXIS1TRANSERRINTSTS 0x0010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) #define IOBAXIS1TRANSERRINTMSK 0x0014
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) #define IOBAXIS1TRANSERRREQINFOL 0x0018
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) #define IOBAXIS1TRANSERRREQINFOH 0x001c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) #define IOBPATRANSERRINTSTS 0x0020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) #define IOBPA_M_REQIDRAM_CORRUPT_MASK BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) #define IOBPA_REQIDRAM_CORRUPT_MASK BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) #define IOBPA_M_TRANS_CORRUPT_MASK BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) #define IOBPA_TRANS_CORRUPT_MASK BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) #define IOBPA_M_WDATA_CORRUPT_MASK BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) #define IOBPA_WDATA_CORRUPT_MASK BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) #define IOBPA_M_RDATA_CORRUPT_MASK BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) #define IOBPA_RDATA_CORRUPT_MASK BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) #define IOBBATRANSERRINTSTS 0x0030
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) #define M_ILLEGAL_ACCESS_MASK BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) #define ILLEGAL_ACCESS_MASK BIT(14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) #define M_WIDRAM_CORRUPT_MASK BIT(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) #define WIDRAM_CORRUPT_MASK BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) #define M_RIDRAM_CORRUPT_MASK BIT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) #define RIDRAM_CORRUPT_MASK BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) #define M_TRANS_CORRUPT_MASK BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) #define TRANS_CORRUPT_MASK BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) #define M_WDATA_CORRUPT_MASK BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) #define WDATA_CORRUPT_MASK BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) #define M_RBM_POISONED_REQ_MASK BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) #define RBM_POISONED_REQ_MASK BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) #define M_XGIC_POISONED_REQ_MASK BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) #define XGIC_POISONED_REQ_MASK BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) #define M_WRERR_RESP_MASK BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) #define WRERR_RESP_MASK BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) #define IOBBATRANSERRREQINFOL 0x0038
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) #define IOBBATRANSERRREQINFOH 0x003c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) #define REQTYPE_F2_RD(src) ((src) & BIT(0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) #define ERRADDRH_F2_RD(src) (((src) & 0xffc00000) >> 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) #define IOBBATRANSERRCSWREQID 0x0040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) #define XGICTRANSERRINTSTS 0x0050
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) #define M_WR_ACCESS_ERR_MASK BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) #define WR_ACCESS_ERR_MASK BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) #define M_RD_ACCESS_ERR_MASK BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) #define RD_ACCESS_ERR_MASK BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) #define XGICTRANSERRINTMSK 0x0054
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) #define XGICTRANSERRREQINFO 0x0058
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) #define REQTYPE_MASK BIT(26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) #define ERRADDR_RD(src) ((src) & 0x03ffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) #define GLBL_ERR_STS 0x0800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) #define MDED_ERR_MASK BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) #define DED_ERR_MASK BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) #define MSEC_ERR_MASK BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) #define SEC_ERR_MASK BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) #define GLBL_SEC_ERRL 0x0810
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) #define GLBL_SEC_ERRH 0x0818
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) #define GLBL_MSEC_ERRL 0x0820
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) #define GLBL_MSEC_ERRH 0x0828
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) #define GLBL_DED_ERRL 0x0830
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) #define GLBL_DED_ERRLMASK 0x0834
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) #define GLBL_DED_ERRH 0x0838
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) #define GLBL_DED_ERRHMASK 0x083c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) #define GLBL_MDED_ERRL 0x0840
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) #define GLBL_MDED_ERRLMASK 0x0844
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) #define GLBL_MDED_ERRH 0x0848
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) #define GLBL_MDED_ERRHMASK 0x084c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) /* IO Bus Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) #define RBCSR 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) #define STICKYERR_MASK BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) #define RBEIR 0x0008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) #define AGENT_OFFLINE_ERR_MASK BIT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) #define UNIMPL_RBPAGE_ERR_MASK BIT(29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) #define WORD_ALIGNED_ERR_MASK BIT(28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) #define PAGE_ACCESS_ERR_MASK BIT(27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) #define WRITE_ACCESS_MASK BIT(26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) static const char * const soc_mem_err_v1[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) "10GbE0",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) "10GbE1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) "Security",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) "SATA45",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) "SATA23/ETH23",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) "SATA01/ETH01",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) "USB1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) "USB0",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) "QML",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) "QM0",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) "QM1 (XGbE01)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) "PCIE4",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) "PCIE3",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) "PCIE2",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) "PCIE1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) "PCIE0",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) "CTX Manager",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) "OCM",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) "1GbE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) "CLE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) "AHBC",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) "PktDMA",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) "GFC",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) "MSLIM",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) "10GbE2",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) "10GbE3",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) "QM2 (XGbE23)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) "IOB",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) "unknown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) "unknown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) "unknown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) "unknown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) static void xgene_edac_iob_gic_report(struct edac_device_ctl_info *edac_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) u32 err_addr_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) u32 err_addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) u32 info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) /* GIC transaction error interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) reg = readl(ctx->dev_csr + XGICTRANSERRINTSTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) if (!reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) goto chk_iob_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) dev_err(edac_dev->dev, "XGIC transaction error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) if (reg & RD_ACCESS_ERR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) dev_err(edac_dev->dev, "XGIC read size error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) if (reg & M_RD_ACCESS_ERR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) dev_err(edac_dev->dev, "Multiple XGIC read size error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) if (reg & WR_ACCESS_ERR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) dev_err(edac_dev->dev, "XGIC write size error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) if (reg & M_WR_ACCESS_ERR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) dev_err(edac_dev->dev, "Multiple XGIC write size error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) info = readl(ctx->dev_csr + XGICTRANSERRREQINFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) dev_err(edac_dev->dev, "XGIC %s access @ 0x%08X (0x%08X)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) info & REQTYPE_MASK ? "read" : "write", ERRADDR_RD(info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) writel(reg, ctx->dev_csr + XGICTRANSERRINTSTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) chk_iob_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) /* IOB memory error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) reg = readl(ctx->dev_csr + GLBL_ERR_STS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) if (!reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) if (reg & SEC_ERR_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) err_addr_lo = readl(ctx->dev_csr + GLBL_SEC_ERRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) err_addr_hi = readl(ctx->dev_csr + GLBL_SEC_ERRH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) "IOB single-bit correctable memory at 0x%08X.%08X error\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) err_addr_lo, err_addr_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) writel(err_addr_lo, ctx->dev_csr + GLBL_SEC_ERRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) writel(err_addr_hi, ctx->dev_csr + GLBL_SEC_ERRH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) if (reg & MSEC_ERR_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) err_addr_lo = readl(ctx->dev_csr + GLBL_MSEC_ERRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) err_addr_hi = readl(ctx->dev_csr + GLBL_MSEC_ERRH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) "IOB multiple single-bit correctable memory at 0x%08X.%08X error\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) err_addr_lo, err_addr_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) writel(err_addr_lo, ctx->dev_csr + GLBL_MSEC_ERRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) writel(err_addr_hi, ctx->dev_csr + GLBL_MSEC_ERRH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) if (reg & (SEC_ERR_MASK | MSEC_ERR_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) if (reg & DED_ERR_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) err_addr_lo = readl(ctx->dev_csr + GLBL_DED_ERRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) err_addr_hi = readl(ctx->dev_csr + GLBL_DED_ERRH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) "IOB double-bit uncorrectable memory at 0x%08X.%08X error\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) err_addr_lo, err_addr_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) writel(err_addr_lo, ctx->dev_csr + GLBL_DED_ERRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) writel(err_addr_hi, ctx->dev_csr + GLBL_DED_ERRH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) if (reg & MDED_ERR_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) err_addr_lo = readl(ctx->dev_csr + GLBL_MDED_ERRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) err_addr_hi = readl(ctx->dev_csr + GLBL_MDED_ERRH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) "Multiple IOB double-bit uncorrectable memory at 0x%08X.%08X error\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) err_addr_lo, err_addr_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) writel(err_addr_lo, ctx->dev_csr + GLBL_MDED_ERRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) writel(err_addr_hi, ctx->dev_csr + GLBL_MDED_ERRH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) if (reg & (DED_ERR_MASK | MDED_ERR_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) static void xgene_edac_rb_report(struct edac_device_ctl_info *edac_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) u32 err_addr_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) u32 err_addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) /* If the register bus resource isn't available, just skip it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) if (!ctx->edac->rb_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) goto rb_skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) * Check RB access errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) * 1. Out of range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) * 2. Un-implemented page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) * 3. Un-aligned access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) * 4. Offline slave IP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) if (regmap_read(ctx->edac->rb_map, RBCSR, ®))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) if (reg & STICKYERR_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) bool write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) dev_err(edac_dev->dev, "IOB bus access error(s)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) if (regmap_read(ctx->edac->rb_map, RBEIR, ®))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) write = reg & WRITE_ACCESS_MASK ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) if (reg & AGENT_OFFLINE_ERR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) "IOB bus %s access to offline agent error\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) write ? "write" : "read");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) if (reg & UNIMPL_RBPAGE_ERR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) "IOB bus %s access to unimplemented page error\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) write ? "write" : "read");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) if (reg & WORD_ALIGNED_ERR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) "IOB bus %s word aligned access error\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) write ? "write" : "read");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) if (reg & PAGE_ACCESS_ERR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) "IOB bus %s to page out of range access error\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) write ? "write" : "read");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) if (regmap_write(ctx->edac->rb_map, RBEIR, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) if (regmap_write(ctx->edac->rb_map, RBCSR, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) rb_skip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) /* IOB Bridge agent transaction error interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) reg = readl(ctx->dev_csr + IOBBATRANSERRINTSTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) if (!reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) dev_err(edac_dev->dev, "IOB bridge agent (BA) transaction error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) if (reg & WRERR_RESP_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) dev_err(edac_dev->dev, "IOB BA write response error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) if (reg & M_WRERR_RESP_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) "Multiple IOB BA write response error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) if (reg & XGIC_POISONED_REQ_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) dev_err(edac_dev->dev, "IOB BA XGIC poisoned write error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) if (reg & M_XGIC_POISONED_REQ_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) "Multiple IOB BA XGIC poisoned write error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) if (reg & RBM_POISONED_REQ_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) dev_err(edac_dev->dev, "IOB BA RBM poisoned write error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) if (reg & M_RBM_POISONED_REQ_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) "Multiple IOB BA RBM poisoned write error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) if (reg & WDATA_CORRUPT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) dev_err(edac_dev->dev, "IOB BA write error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) if (reg & M_WDATA_CORRUPT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) dev_err(edac_dev->dev, "Multiple IOB BA write error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) if (reg & TRANS_CORRUPT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) dev_err(edac_dev->dev, "IOB BA transaction error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) if (reg & M_TRANS_CORRUPT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) dev_err(edac_dev->dev, "Multiple IOB BA transaction error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) if (reg & RIDRAM_CORRUPT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) "IOB BA RDIDRAM read transaction ID error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) if (reg & M_RIDRAM_CORRUPT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) "Multiple IOB BA RDIDRAM read transaction ID error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) if (reg & WIDRAM_CORRUPT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) "IOB BA RDIDRAM write transaction ID error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) if (reg & M_WIDRAM_CORRUPT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) "Multiple IOB BA RDIDRAM write transaction ID error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) if (reg & ILLEGAL_ACCESS_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) "IOB BA XGIC/RB illegal access error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) if (reg & M_ILLEGAL_ACCESS_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) "Multiple IOB BA XGIC/RB illegal access error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) err_addr_lo = readl(ctx->dev_csr + IOBBATRANSERRREQINFOL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) err_addr_hi = readl(ctx->dev_csr + IOBBATRANSERRREQINFOH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) dev_err(edac_dev->dev, "IOB BA %s access at 0x%02X.%08X (0x%08X)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) REQTYPE_F2_RD(err_addr_hi) ? "read" : "write",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) ERRADDRH_F2_RD(err_addr_hi), err_addr_lo, err_addr_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) if (reg & WRERR_RESP_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) dev_err(edac_dev->dev, "IOB BA requestor ID 0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) readl(ctx->dev_csr + IOBBATRANSERRCSWREQID));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) writel(reg, ctx->dev_csr + IOBBATRANSERRINTSTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) static void xgene_edac_pa_report(struct edac_device_ctl_info *edac_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) u32 err_addr_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) u32 err_addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) /* IOB Processing agent transaction error interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) reg = readl(ctx->dev_csr + IOBPATRANSERRINTSTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) if (!reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) goto chk_iob_axi0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) dev_err(edac_dev->dev, "IOB processing agent (PA) transaction error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) if (reg & IOBPA_RDATA_CORRUPT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) dev_err(edac_dev->dev, "IOB PA read data RAM error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) if (reg & IOBPA_M_RDATA_CORRUPT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) "Multiple IOB PA read data RAM error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) if (reg & IOBPA_WDATA_CORRUPT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) dev_err(edac_dev->dev, "IOB PA write data RAM error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) if (reg & IOBPA_M_WDATA_CORRUPT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) "Multiple IOB PA write data RAM error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) if (reg & IOBPA_TRANS_CORRUPT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) dev_err(edac_dev->dev, "IOB PA transaction error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) if (reg & IOBPA_M_TRANS_CORRUPT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) dev_err(edac_dev->dev, "Multiple IOB PA transaction error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) if (reg & IOBPA_REQIDRAM_CORRUPT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) dev_err(edac_dev->dev, "IOB PA transaction ID RAM error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) if (reg & IOBPA_M_REQIDRAM_CORRUPT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) "Multiple IOB PA transaction ID RAM error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) writel(reg, ctx->dev_csr + IOBPATRANSERRINTSTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) chk_iob_axi0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) /* IOB AXI0 Error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) reg = readl(ctx->dev_csr + IOBAXIS0TRANSERRINTSTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) if (!reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) goto chk_iob_axi1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) err_addr_lo = readl(ctx->dev_csr + IOBAXIS0TRANSERRREQINFOL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) err_addr_hi = readl(ctx->dev_csr + IOBAXIS0TRANSERRREQINFOH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) "%sAXI slave 0 illegal %s access @ 0x%02X.%08X (0x%08X)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) reg & IOBAXIS0_M_ILLEGAL_ACCESS_MASK ? "Multiple " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) REQTYPE_RD(err_addr_hi) ? "read" : "write",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) ERRADDRH_RD(err_addr_hi), err_addr_lo, err_addr_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) writel(reg, ctx->dev_csr + IOBAXIS0TRANSERRINTSTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) chk_iob_axi1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) /* IOB AXI1 Error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) reg = readl(ctx->dev_csr + IOBAXIS1TRANSERRINTSTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) if (!reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) err_addr_lo = readl(ctx->dev_csr + IOBAXIS1TRANSERRREQINFOL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) err_addr_hi = readl(ctx->dev_csr + IOBAXIS1TRANSERRREQINFOH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) dev_err(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) "%sAXI slave 1 illegal %s access @ 0x%02X.%08X (0x%08X)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) reg & IOBAXIS0_M_ILLEGAL_ACCESS_MASK ? "Multiple " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) REQTYPE_RD(err_addr_hi) ? "read" : "write",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) ERRADDRH_RD(err_addr_hi), err_addr_lo, err_addr_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) writel(reg, ctx->dev_csr + IOBAXIS1TRANSERRINTSTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) static void xgene_edac_soc_check(struct edac_device_ctl_info *edac_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) const char * const *soc_mem_err = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) u32 pcp_hp_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) u32 pcp_lp_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) xgene_edac_pcp_rd(ctx->edac, PCPHPERRINTSTS, &pcp_hp_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) xgene_edac_pcp_rd(ctx->edac, PCPLPERRINTSTS, &pcp_lp_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) xgene_edac_pcp_rd(ctx->edac, MEMERRINTSTS, ®);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) if (!((pcp_hp_stat & (IOB_PA_ERR_MASK | IOB_BA_ERR_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) IOB_XGIC_ERR_MASK | IOB_RB_ERR_MASK)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) (pcp_lp_stat & CSW_SWITCH_TRACE_ERR_MASK) || reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) if (pcp_hp_stat & IOB_XGIC_ERR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) xgene_edac_iob_gic_report(edac_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) if (pcp_hp_stat & (IOB_RB_ERR_MASK | IOB_BA_ERR_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) xgene_edac_rb_report(edac_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) if (pcp_hp_stat & IOB_PA_ERR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) xgene_edac_pa_report(edac_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) if (pcp_lp_stat & CSW_SWITCH_TRACE_ERR_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) dev_info(edac_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) "CSW switch trace correctable memory parity error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) if (!reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) if (ctx->version == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) soc_mem_err = soc_mem_err_v1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) if (!soc_mem_err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) dev_err(edac_dev->dev, "SoC memory parity error 0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) for (i = 0; i < 31; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (reg & (1 << i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) dev_err(edac_dev->dev, "%s memory parity error\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) soc_mem_err[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) edac_device_handle_ue(edac_dev, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) edac_dev->ctl_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) static void xgene_edac_soc_hw_init(struct edac_device_ctl_info *edac_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) /* Enable SoC IP error interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) if (edac_dev->op_state == OP_RUNNING_INTERRUPT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) if (enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) xgene_edac_pcp_clrbits(ctx->edac, PCPHPERRINTMSK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) IOB_PA_ERR_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) IOB_BA_ERR_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) IOB_XGIC_ERR_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) IOB_RB_ERR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) xgene_edac_pcp_clrbits(ctx->edac, PCPLPERRINTMSK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) CSW_SWITCH_TRACE_ERR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) xgene_edac_pcp_setbits(ctx->edac, PCPHPERRINTMSK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) IOB_PA_ERR_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) IOB_BA_ERR_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) IOB_XGIC_ERR_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) IOB_RB_ERR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) xgene_edac_pcp_setbits(ctx->edac, PCPLPERRINTMSK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) CSW_SWITCH_TRACE_ERR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) writel(enable ? 0x0 : 0xFFFFFFFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) ctx->dev_csr + IOBAXIS0TRANSERRINTMSK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) writel(enable ? 0x0 : 0xFFFFFFFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) ctx->dev_csr + IOBAXIS1TRANSERRINTMSK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) writel(enable ? 0x0 : 0xFFFFFFFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) ctx->dev_csr + XGICTRANSERRINTMSK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) xgene_edac_pcp_setbits(ctx->edac, MEMERRINTMSK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) enable ? 0x0 : 0xFFFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) static int xgene_edac_soc_add(struct xgene_edac *edac, struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) int version)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) struct edac_device_ctl_info *edac_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) struct xgene_edac_dev_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) void __iomem *dev_csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) struct resource res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) int edac_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) if (!devres_open_group(edac->dev, xgene_edac_soc_add, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) rc = of_address_to_resource(np, 0, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) dev_err(edac->dev, "no SoC resource address\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) goto err_release_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) dev_csr = devm_ioremap_resource(edac->dev, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) if (IS_ERR(dev_csr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) dev_err(edac->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) "devm_ioremap_resource failed for soc resource address\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) rc = PTR_ERR(dev_csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) goto err_release_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) edac_idx = edac_device_alloc_index();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) edac_dev = edac_device_alloc_ctl_info(sizeof(*ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) "SOC", 1, "SOC", 1, 2, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) edac_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) if (!edac_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) goto err_release_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) ctx = edac_dev->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) ctx->dev_csr = dev_csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) ctx->name = "xgene_soc_err";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) ctx->edac_idx = edac_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) ctx->edac = edac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) ctx->edac_dev = edac_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) ctx->ddev = *edac->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) ctx->version = version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) edac_dev->dev = &ctx->ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) edac_dev->ctl_name = ctx->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) edac_dev->dev_name = ctx->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) edac_dev->mod_name = EDAC_MOD_STR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) if (edac_op_state == EDAC_OPSTATE_POLL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) edac_dev->edac_check = xgene_edac_soc_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) rc = edac_device_add_device(edac_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) if (rc > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) dev_err(edac->dev, "failed edac_device_add_device()\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) goto err_ctl_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) if (edac_op_state == EDAC_OPSTATE_INT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) edac_dev->op_state = OP_RUNNING_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) list_add(&ctx->next, &edac->socs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) xgene_edac_soc_hw_init(edac_dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) devres_remove_group(edac->dev, xgene_edac_soc_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) dev_info(edac->dev, "X-Gene EDAC SoC registered\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) err_ctl_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) edac_device_free_ctl_info(edac_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) err_release_group:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) devres_release_group(edac->dev, xgene_edac_soc_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) static int xgene_edac_soc_remove(struct xgene_edac_dev_ctx *soc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) struct edac_device_ctl_info *edac_dev = soc->edac_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) xgene_edac_soc_hw_init(edac_dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) edac_device_del_device(soc->edac->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) edac_device_free_ctl_info(edac_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) static irqreturn_t xgene_edac_isr(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) struct xgene_edac *ctx = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) struct xgene_edac_pmd_ctx *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) struct xgene_edac_dev_ctx *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) unsigned int pcp_hp_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) unsigned int pcp_lp_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) xgene_edac_pcp_rd(ctx, PCPHPERRINTSTS, &pcp_hp_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) xgene_edac_pcp_rd(ctx, PCPLPERRINTSTS, &pcp_lp_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) if ((MCU_UNCORR_ERR_MASK & pcp_hp_stat) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) (MCU_CTL_ERR_MASK & pcp_hp_stat) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) (MCU_CORR_ERR_MASK & pcp_lp_stat)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) struct xgene_edac_mc_ctx *mcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) list_for_each_entry(mcu, &ctx->mcus, next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) xgene_edac_mc_check(mcu->mci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) list_for_each_entry(pmd, &ctx->pmds, next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) if ((PMD0_MERR_MASK << pmd->pmd) & pcp_hp_stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) xgene_edac_pmd_check(pmd->edac_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) list_for_each_entry(node, &ctx->l3s, next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) xgene_edac_l3_check(node->edac_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) list_for_each_entry(node, &ctx->socs, next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) xgene_edac_soc_check(node->edac_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) static int xgene_edac_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) struct xgene_edac *edac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) struct device_node *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) edac = devm_kzalloc(&pdev->dev, sizeof(*edac), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) if (!edac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) edac->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) platform_set_drvdata(pdev, edac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) INIT_LIST_HEAD(&edac->mcus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) INIT_LIST_HEAD(&edac->pmds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) INIT_LIST_HEAD(&edac->l3s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) INIT_LIST_HEAD(&edac->socs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) spin_lock_init(&edac->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) mutex_init(&edac->mc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) edac->csw_map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) "regmap-csw");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) if (IS_ERR(edac->csw_map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) dev_err(edac->dev, "unable to get syscon regmap csw\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) rc = PTR_ERR(edac->csw_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) edac->mcba_map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) "regmap-mcba");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) if (IS_ERR(edac->mcba_map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) dev_err(edac->dev, "unable to get syscon regmap mcba\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) rc = PTR_ERR(edac->mcba_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) edac->mcbb_map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) "regmap-mcbb");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) if (IS_ERR(edac->mcbb_map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) dev_err(edac->dev, "unable to get syscon regmap mcbb\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) rc = PTR_ERR(edac->mcbb_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) edac->efuse_map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) "regmap-efuse");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) if (IS_ERR(edac->efuse_map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) dev_err(edac->dev, "unable to get syscon regmap efuse\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) rc = PTR_ERR(edac->efuse_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) * NOTE: The register bus resource is optional for compatibility
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) * reason.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) edac->rb_map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) "regmap-rb");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) if (IS_ERR(edac->rb_map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) dev_warn(edac->dev, "missing syscon regmap rb\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) edac->rb_map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) edac->pcp_csr = devm_ioremap_resource(&pdev->dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) if (IS_ERR(edac->pcp_csr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) dev_err(&pdev->dev, "no PCP resource address\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) rc = PTR_ERR(edac->pcp_csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) if (edac_op_state == EDAC_OPSTATE_INT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) for (i = 0; i < 3; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) irq = platform_get_irq(pdev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) if (irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) dev_err(&pdev->dev, "No IRQ resource\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) rc = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) rc = devm_request_irq(&pdev->dev, irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) xgene_edac_isr, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) dev_name(&pdev->dev), edac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) "Could not request IRQ %d\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) edac->dfs = edac_debugfs_create_dir(pdev->dev.kobj.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) for_each_child_of_node(pdev->dev.of_node, child) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) if (!of_device_is_available(child))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) if (of_device_is_compatible(child, "apm,xgene-edac-mc"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) xgene_edac_mc_add(edac, child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) if (of_device_is_compatible(child, "apm,xgene-edac-pmd"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) xgene_edac_pmd_add(edac, child, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) if (of_device_is_compatible(child, "apm,xgene-edac-pmd-v2"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) xgene_edac_pmd_add(edac, child, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) if (of_device_is_compatible(child, "apm,xgene-edac-l3"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) xgene_edac_l3_add(edac, child, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) if (of_device_is_compatible(child, "apm,xgene-edac-l3-v2"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) xgene_edac_l3_add(edac, child, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) if (of_device_is_compatible(child, "apm,xgene-edac-soc"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) xgene_edac_soc_add(edac, child, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) if (of_device_is_compatible(child, "apm,xgene-edac-soc-v1"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) xgene_edac_soc_add(edac, child, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) static int xgene_edac_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) struct xgene_edac *edac = dev_get_drvdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) struct xgene_edac_mc_ctx *mcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) struct xgene_edac_mc_ctx *temp_mcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) struct xgene_edac_pmd_ctx *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) struct xgene_edac_pmd_ctx *temp_pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) struct xgene_edac_dev_ctx *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) struct xgene_edac_dev_ctx *temp_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) list_for_each_entry_safe(mcu, temp_mcu, &edac->mcus, next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) xgene_edac_mc_remove(mcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) list_for_each_entry_safe(pmd, temp_pmd, &edac->pmds, next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) xgene_edac_pmd_remove(pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) list_for_each_entry_safe(node, temp_node, &edac->l3s, next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) xgene_edac_l3_remove(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) list_for_each_entry_safe(node, temp_node, &edac->socs, next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) xgene_edac_soc_remove(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) static const struct of_device_id xgene_edac_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) { .compatible = "apm,xgene-edac" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) MODULE_DEVICE_TABLE(of, xgene_edac_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) static struct platform_driver xgene_edac_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) .probe = xgene_edac_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) .remove = xgene_edac_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) .name = "xgene-edac",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) .of_match_table = xgene_edac_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) static int __init xgene_edac_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) /* Make sure error reporting method is sane */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) switch (edac_op_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) case EDAC_OPSTATE_POLL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) case EDAC_OPSTATE_INT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) edac_op_state = EDAC_OPSTATE_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) rc = platform_driver_register(&xgene_edac_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) edac_printk(KERN_ERR, EDAC_MOD_STR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) "EDAC fails to register\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) goto reg_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) reg_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) module_init(xgene_edac_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) static void __exit xgene_edac_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) platform_driver_unregister(&xgene_edac_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) module_exit(xgene_edac_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) MODULE_AUTHOR("Feng Kan <fkan@apm.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) MODULE_DESCRIPTION("APM X-Gene EDAC driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) module_param(edac_op_state, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) MODULE_PARM_DESC(edac_op_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) "EDAC error reporting state: 0=Poll, 2=Interrupt");