Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright 2011-2012 Calxeda, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/ctype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/edac.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include "edac_module.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) /* DDR Ctrlr Error Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #define HB_DDR_ECC_ERR_BASE		0x128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #define MW_DDR_ECC_ERR_BASE		0x1b4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #define HB_DDR_ECC_OPT			0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #define HB_DDR_ECC_U_ERR_ADDR		0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #define HB_DDR_ECC_U_ERR_STAT		0x0c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #define HB_DDR_ECC_U_ERR_DATAL		0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define HB_DDR_ECC_U_ERR_DATAH		0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #define HB_DDR_ECC_C_ERR_ADDR		0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #define HB_DDR_ECC_C_ERR_STAT		0x1c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define HB_DDR_ECC_C_ERR_DATAL		0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define HB_DDR_ECC_C_ERR_DATAH		0x24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define HB_DDR_ECC_OPT_MODE_MASK	0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #define HB_DDR_ECC_OPT_FWC		0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define HB_DDR_ECC_OPT_XOR_SHIFT	16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) /* DDR Ctrlr Interrupt Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #define HB_DDR_ECC_INT_BASE		0x180
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define MW_DDR_ECC_INT_BASE		0x218
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define HB_DDR_ECC_INT_STATUS		0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #define HB_DDR_ECC_INT_ACK		0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #define HB_DDR_ECC_INT_STAT_CE		0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #define HB_DDR_ECC_INT_STAT_DOUBLE_CE	0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #define HB_DDR_ECC_INT_STAT_UE		0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #define HB_DDR_ECC_INT_STAT_DOUBLE_UE	0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) struct hb_mc_drvdata {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	void __iomem *mc_err_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	void __iomem *mc_int_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) static irqreturn_t highbank_mc_err_handler(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	struct mem_ctl_info *mci = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	struct hb_mc_drvdata *drvdata = mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	u32 status, err_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	/* Read the interrupt status register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	status = readl(drvdata->mc_int_base + HB_DDR_ECC_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	if (status & HB_DDR_ECC_INT_STAT_UE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 		err_addr = readl(drvdata->mc_err_base + HB_DDR_ECC_U_ERR_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 				     err_addr >> PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 				     err_addr & ~PAGE_MASK, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 				     0, 0, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 				     mci->ctl_name, "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	if (status & HB_DDR_ECC_INT_STAT_CE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		u32 syndrome = readl(drvdata->mc_err_base + HB_DDR_ECC_C_ERR_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		syndrome = (syndrome >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		err_addr = readl(drvdata->mc_err_base + HB_DDR_ECC_C_ERR_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 				     err_addr >> PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 				     err_addr & ~PAGE_MASK, syndrome,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 				     0, 0, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 				     mci->ctl_name, "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	/* clear the error, clears the interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	writel(status, drvdata->mc_int_base + HB_DDR_ECC_INT_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) static void highbank_mc_err_inject(struct mem_ctl_info *mci, u8 synd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	struct hb_mc_drvdata *pdata = mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	reg = readl(pdata->mc_err_base + HB_DDR_ECC_OPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	reg &= HB_DDR_ECC_OPT_MODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	reg |= (synd << HB_DDR_ECC_OPT_XOR_SHIFT) | HB_DDR_ECC_OPT_FWC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	writel(reg, pdata->mc_err_base + HB_DDR_ECC_OPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) static ssize_t highbank_mc_inject_ctrl(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	struct device_attribute *attr, const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	struct mem_ctl_info *mci = to_mci(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	u8 synd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	if (kstrtou8(buf, 16, &synd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	highbank_mc_err_inject(mci, synd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static DEVICE_ATTR(inject_ctrl, S_IWUSR, NULL, highbank_mc_inject_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static struct attribute *highbank_dev_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	&dev_attr_inject_ctrl.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) ATTRIBUTE_GROUPS(highbank_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct hb_mc_settings {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	int	err_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	int	int_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static struct hb_mc_settings hb_settings = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	.err_offset = HB_DDR_ECC_ERR_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	.int_offset = HB_DDR_ECC_INT_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static struct hb_mc_settings mw_settings = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	.err_offset = MW_DDR_ECC_ERR_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	.int_offset = MW_DDR_ECC_INT_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static const struct of_device_id hb_ddr_ctrl_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	{ .compatible = "calxeda,hb-ddr-ctrl",		.data = &hb_settings },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	{ .compatible = "calxeda,ecx-2000-ddr-ctrl",	.data = &mw_settings },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) MODULE_DEVICE_TABLE(of, hb_ddr_ctrl_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static int highbank_mc_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	const struct of_device_id *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	const struct hb_mc_settings *settings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	struct edac_mc_layer layers[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	struct mem_ctl_info *mci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	struct hb_mc_drvdata *drvdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	struct dimm_info *dimm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	struct resource *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	u32 control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	int res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	id = of_match_device(hb_ddr_ctrl_of_match, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	if (!id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	layers[0].size = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	layers[0].is_virt_csrow = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	layers[1].type = EDAC_MC_LAYER_CHANNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	layers[1].size = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	layers[1].is_virt_csrow = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 			    sizeof(struct hb_mc_drvdata));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	if (!mci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	mci->pdev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	drvdata = mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	platform_set_drvdata(pdev, mci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	if (!r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		dev_err(&pdev->dev, "Unable to get mem resource\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		res = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	if (!devm_request_mem_region(&pdev->dev, r->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 				     resource_size(r), dev_name(&pdev->dev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		dev_err(&pdev->dev, "Error while requesting mem region\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		res = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	if (!base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		dev_err(&pdev->dev, "Unable to map regs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		res = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	settings = id->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	drvdata->mc_err_base = base + settings->err_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	drvdata->mc_int_base = base + settings->int_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	control = readl(drvdata->mc_err_base + HB_DDR_ECC_OPT) & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	if (!control || (control == 0x2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		dev_err(&pdev->dev, "No ECC present, or ECC disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		res = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	mci->mtype_cap = MEM_FLAG_DDR3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	mci->edac_cap = EDAC_FLAG_SECDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	mci->mod_name = pdev->dev.driver->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	mci->ctl_name = id->compatible;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	mci->dev_name = dev_name(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	mci->scrub_mode = SCRUB_SW_SRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	/* Only a single 4GB DIMM is supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	dimm = *mci->dimms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	dimm->nr_pages = (~0UL >> PAGE_SHIFT) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	dimm->grain = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	dimm->dtype = DEV_X8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	dimm->mtype = MEM_DDR3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	dimm->edac_mode = EDAC_SECDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	res = edac_mc_add_mc_with_groups(mci, highbank_dev_groups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	if (res < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	res = devm_request_irq(&pdev->dev, irq, highbank_mc_err_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 			       0, dev_name(&pdev->dev), mci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	if (res < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	devres_close_group(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	edac_mc_del_mc(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	devres_release_group(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	edac_mc_free(mci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static int highbank_mc_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	struct mem_ctl_info *mci = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	edac_mc_del_mc(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	edac_mc_free(mci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static struct platform_driver highbank_mc_edac_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	.probe = highbank_mc_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	.remove = highbank_mc_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		.name = "hb_mc_edac",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		.of_match_table = hb_ddr_ctrl_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) module_platform_driver(highbank_mc_edac_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) MODULE_AUTHOR("Calxeda, Inc.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) MODULE_DESCRIPTION("EDAC Driver for Calxeda Highbank");