^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2018, 2019 Cisco Systems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/edac.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/stop_machine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/regmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "edac_module.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define DRV_NAME "aspeed-edac"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define ASPEED_MCR_PROT 0x00 /* protection key register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define ASPEED_MCR_CONF 0x04 /* configuration register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define ASPEED_MCR_INTR_CTRL 0x50 /* interrupt control/status register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define ASPEED_MCR_ADDR_UNREC 0x58 /* address of first un-recoverable error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define ASPEED_MCR_ADDR_REC 0x5c /* address of last recoverable error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define ASPEED_MCR_LAST ASPEED_MCR_ADDR_REC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define ASPEED_MCR_PROT_PASSWD 0xfc600309
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define ASPEED_MCR_CONF_DRAM_TYPE BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define ASPEED_MCR_CONF_ECC BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define ASPEED_MCR_INTR_CTRL_CLEAR BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define ASPEED_MCR_INTR_CTRL_CNT_REC GENMASK(23, 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define ASPEED_MCR_INTR_CTRL_CNT_UNREC GENMASK(15, 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define ASPEED_MCR_INTR_CTRL_ENABLE (BIT(0) | BIT(1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static struct regmap *aspeed_regmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static int regmap_reg_write(void *context, unsigned int reg, unsigned int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) void __iomem *regs = (void __iomem *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* enable write to MCR register set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) writel(ASPEED_MCR_PROT_PASSWD, regs + ASPEED_MCR_PROT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) writel(val, regs + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* disable write to MCR register set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) writel(~ASPEED_MCR_PROT_PASSWD, regs + ASPEED_MCR_PROT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static int regmap_reg_read(void *context, unsigned int reg, unsigned int *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) void __iomem *regs = (void __iomem *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) *val = readl(regs + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static bool regmap_is_volatile(struct device *dev, unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) switch (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) case ASPEED_MCR_PROT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) case ASPEED_MCR_INTR_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) case ASPEED_MCR_ADDR_UNREC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) case ASPEED_MCR_ADDR_REC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static const struct regmap_config aspeed_regmap_config = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) .reg_bits = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) .val_bits = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) .reg_stride = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) .max_register = ASPEED_MCR_LAST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) .reg_write = regmap_reg_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) .reg_read = regmap_reg_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) .volatile_reg = regmap_is_volatile,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) .fast_io = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static void count_rec(struct mem_ctl_info *mci, u8 rec_cnt, u32 rec_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct csrow_info *csrow = mci->csrows[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) u32 page, offset, syndrome;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (!rec_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /* report first few errors (if there are) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /* note: no addresses are recorded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (rec_cnt > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* page, offset and syndrome are not available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) page = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) syndrome = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, rec_cnt-1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) page, offset, syndrome, 0, 0, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) "address(es) not available", "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* report last error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /* note: rec_addr is the last recoverable error addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) page = rec_addr >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) offset = rec_addr & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /* syndrome is not available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) syndrome = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) csrow->first_page + page, offset, syndrome,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 0, 0, -1, "", "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static void count_un_rec(struct mem_ctl_info *mci, u8 un_rec_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) u32 un_rec_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct csrow_info *csrow = mci->csrows[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) u32 page, offset, syndrome;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (!un_rec_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /* report 1. error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* note: un_rec_addr is the first unrecoverable error addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) page = un_rec_addr >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) offset = un_rec_addr & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /* syndrome is not available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) syndrome = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) csrow->first_page + page, offset, syndrome,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 0, 0, -1, "", "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /* report further errors (if there are) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* note: no addresses are recorded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (un_rec_cnt > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /* page, offset and syndrome are not available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) page = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) syndrome = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, un_rec_cnt-1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) page, offset, syndrome, 0, 0, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) "address(es) not available", "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static irqreturn_t mcr_isr(int irq, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct mem_ctl_info *mci = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) u32 rec_addr, un_rec_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) u32 reg50, reg5c, reg58;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) u8 rec_cnt, un_rec_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) regmap_read(aspeed_regmap, ASPEED_MCR_INTR_CTRL, ®50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) dev_dbg(mci->pdev, "received edac interrupt w/ mcr register 50: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) reg50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /* collect data about recoverable and unrecoverable errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) rec_cnt = (reg50 & ASPEED_MCR_INTR_CTRL_CNT_REC) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) un_rec_cnt = (reg50 & ASPEED_MCR_INTR_CTRL_CNT_UNREC) >> 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) dev_dbg(mci->pdev, "%d recoverable interrupts and %d unrecoverable interrupts\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) rec_cnt, un_rec_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) regmap_read(aspeed_regmap, ASPEED_MCR_ADDR_UNREC, ®58);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) un_rec_addr = reg58;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) regmap_read(aspeed_regmap, ASPEED_MCR_ADDR_REC, ®5c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) rec_addr = reg5c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* clear interrupt flags and error counters: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) regmap_update_bits(aspeed_regmap, ASPEED_MCR_INTR_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) ASPEED_MCR_INTR_CTRL_CLEAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) ASPEED_MCR_INTR_CTRL_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) regmap_update_bits(aspeed_regmap, ASPEED_MCR_INTR_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) ASPEED_MCR_INTR_CTRL_CLEAR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /* process recoverable and unrecoverable errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) count_rec(mci, rec_cnt, rec_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) count_un_rec(mci, un_rec_cnt, un_rec_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (!rec_cnt && !un_rec_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) dev_dbg(mci->pdev, "received edac interrupt, but did not find any ECC counters\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) regmap_read(aspeed_regmap, ASPEED_MCR_INTR_CTRL, ®50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) dev_dbg(mci->pdev, "edac interrupt handled. mcr reg 50 is now: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) reg50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static int config_irq(void *ctx, struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /* register interrupt handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) dev_dbg(&pdev->dev, "got irq %d\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) rc = devm_request_irq(&pdev->dev, irq, mcr_isr, IRQF_TRIGGER_HIGH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) DRV_NAME, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) dev_err(&pdev->dev, "unable to request irq %d\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /* enable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) regmap_update_bits(aspeed_regmap, ASPEED_MCR_INTR_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) ASPEED_MCR_INTR_CTRL_ENABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) ASPEED_MCR_INTR_CTRL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static int init_csrows(struct mem_ctl_info *mci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct csrow_info *csrow = mci->csrows[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) u32 nr_pages, dram_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct dimm_info *dimm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct resource r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) u32 reg04;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /* retrieve info about physical memory from device tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) np = of_find_node_by_path("/memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (!np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) dev_err(mci->pdev, "dt: missing /memory node\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) rc = of_address_to_resource(np, 0, &r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) dev_err(mci->pdev, "dt: failed requesting resource for /memory node\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) dev_dbg(mci->pdev, "dt: /memory node resources: first page r.start=0x%x, resource_size=0x%x, PAGE_SHIFT macro=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) r.start, resource_size(&r), PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) csrow->first_page = r.start >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) nr_pages = resource_size(&r) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) csrow->last_page = csrow->first_page + nr_pages - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) regmap_read(aspeed_regmap, ASPEED_MCR_CONF, ®04);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) dram_type = (reg04 & ASPEED_MCR_CONF_DRAM_TYPE) ? MEM_DDR4 : MEM_DDR3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) dimm = csrow->channels[0]->dimm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) dimm->mtype = dram_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) dimm->edac_mode = EDAC_SECDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) dimm->nr_pages = nr_pages / csrow->nr_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) dev_dbg(mci->pdev, "initialized dimm with first_page=0x%lx and nr_pages=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) csrow->first_page, nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static int aspeed_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct edac_mc_layer layers[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct mem_ctl_info *mci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) void __iomem *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) u32 reg04;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) regs = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (IS_ERR(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return PTR_ERR(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) aspeed_regmap = devm_regmap_init(dev, NULL, (__force void *)regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) &aspeed_regmap_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (IS_ERR(aspeed_regmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) return PTR_ERR(aspeed_regmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /* bail out if ECC mode is not configured */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) regmap_read(aspeed_regmap, ASPEED_MCR_CONF, ®04);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (!(reg04 & ASPEED_MCR_CONF_ECC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) dev_err(&pdev->dev, "ECC mode is not configured in u-boot\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) edac_op_state = EDAC_OPSTATE_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /* allocate & init EDAC MC data structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) layers[0].size = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) layers[0].is_virt_csrow = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) layers[1].type = EDAC_MC_LAYER_CHANNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) layers[1].size = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) layers[1].is_virt_csrow = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (!mci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) mci->pdev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) mci->edac_ctl_cap = EDAC_FLAG_SECDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) mci->edac_cap = EDAC_FLAG_SECDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) mci->scrub_cap = SCRUB_FLAG_HW_SRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) mci->scrub_mode = SCRUB_HW_SRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) mci->mod_name = DRV_NAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) mci->ctl_name = "MIC";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) mci->dev_name = dev_name(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) rc = init_csrows(mci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) dev_err(&pdev->dev, "failed to init csrows\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) goto probe_exit02;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) platform_set_drvdata(pdev, mci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /* register with edac core */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) rc = edac_mc_add_mc(mci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) dev_err(&pdev->dev, "failed to register with EDAC core\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) goto probe_exit02;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /* register interrupt handler and enable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) rc = config_irq(mci, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) dev_err(&pdev->dev, "failed setting up irq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) goto probe_exit01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) probe_exit01:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) edac_mc_del_mc(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) probe_exit02:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) edac_mc_free(mci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static int aspeed_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct mem_ctl_info *mci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) /* disable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) regmap_update_bits(aspeed_regmap, ASPEED_MCR_INTR_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) ASPEED_MCR_INTR_CTRL_ENABLE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /* free resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) mci = edac_mc_del_mc(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (mci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) edac_mc_free(mci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) static const struct of_device_id aspeed_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) { .compatible = "aspeed,ast2500-sdram-edac" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static struct platform_driver aspeed_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) .name = DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) .of_match_table = aspeed_of_match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) .probe = aspeed_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) .remove = aspeed_remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) module_platform_driver(aspeed_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) MODULE_AUTHOR("Stefan Schaeckeler <sschaeck@cisco.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) MODULE_DESCRIPTION("Aspeed AST2500 EDAC driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) MODULE_VERSION("1.0");