^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Marvell MV64x60 Memory Controller kernel module for PPC platforms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Dave Jiang <djiang@mvista.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * the terms of the GNU General Public License version 2. This program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * is licensed "as is" without any warranty of any kind, whether express
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * or implied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/edac.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "edac_module.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "mv64x60_edac.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static const char *mv64x60_ctl_name = "MV64x60";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static int edac_dev_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static int edac_pci_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static int edac_mc_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /*********************** PCI err device **********************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #ifdef CONFIG_PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static void mv64x60_pci_check(struct edac_pci_ctl_info *pci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct mv64x60_pci_pdata *pdata = pci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) u32 cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) cause = readl(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) if (!cause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) printk(KERN_ERR "Error in PCI %d Interface\n", pdata->pci_hose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) printk(KERN_ERR "Cause register: 0x%08x\n", cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) printk(KERN_ERR "Address Low: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) readl(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_LO));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) printk(KERN_ERR "Address High: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) readl(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_HI));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) printk(KERN_ERR "Attribute: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) readl(pdata->pci_vbase + MV64X60_PCI_ERROR_ATTR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) printk(KERN_ERR "Command: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) readl(pdata->pci_vbase + MV64X60_PCI_ERROR_CMD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) writel(~cause, pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (cause & MV64X60_PCI_PE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) edac_pci_handle_pe(pci, pci->ctl_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) if (!(cause & MV64X60_PCI_PE_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) edac_pci_handle_npe(pci, pci->ctl_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static irqreturn_t mv64x60_pci_isr(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct edac_pci_ctl_info *pci = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct mv64x60_pci_pdata *pdata = pci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) val = readl(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (!val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) mv64x60_pci_check(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * Bit 0 of MV64x60_PCIx_ERR_MASK does not exist on the 64360 and because of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * errata FEr-#11 and FEr-##16 for the 64460, it should be 0 on that chip as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * well. IOW, don't set bit 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /* Erratum FEr PCI-#16: clear bit 0 of PCI SERRn Mask reg. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static int __init mv64x60_pci_fixup(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct resource *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) void __iomem *pci_serr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (!r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) printk(KERN_ERR "%s: Unable to get resource for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) "PCI err regs\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) pci_serr = ioremap(r->start, resource_size(r));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (!pci_serr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) writel(readl(pci_serr) & ~0x1, pci_serr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) iounmap(pci_serr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static int mv64x60_pci_err_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct edac_pci_ctl_info *pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct mv64x60_pci_pdata *pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct resource *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) int res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (!devres_open_group(&pdev->dev, mv64x60_pci_err_probe, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mv64x60_pci_err");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (!pci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) pdata = pci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) pdata->pci_hose = pdev->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) pdata->name = "mv64x60_pci_err";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) platform_set_drvdata(pdev, pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) pci->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) pci->dev_name = dev_name(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) pci->mod_name = EDAC_MOD_STR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) pci->ctl_name = pdata->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (edac_op_state == EDAC_OPSTATE_POLL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) pci->edac_check = mv64x60_pci_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) pdata->edac_idx = edac_pci_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (!r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) printk(KERN_ERR "%s: Unable to get resource for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) "PCI err regs\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) res = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (!devm_request_mem_region(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) r->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) resource_size(r),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) pdata->name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) printk(KERN_ERR "%s: Error while requesting mem region\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) res = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) pdata->pci_vbase = devm_ioremap(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) r->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) resource_size(r));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (!pdata->pci_vbase) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) res = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) res = mv64x60_pci_fixup(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (res < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) printk(KERN_ERR "%s: PCI fixup failed\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) writel(0, pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) writel(0, pdata->pci_vbase + MV64X60_PCI_ERROR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) writel(MV64X60_PCIx_ERR_MASK_VAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) pdata->pci_vbase + MV64X60_PCI_ERROR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) edac_dbg(3, "failed edac_pci_add_device()\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (edac_op_state == EDAC_OPSTATE_INT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) pdata->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) res = devm_request_irq(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) pdata->irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) mv64x60_pci_isr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) "[EDAC] PCI err",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (res < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) printk(KERN_ERR "%s: Unable to request irq %d for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) "MV64x60 PCI ERR\n", __func__, pdata->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) res = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for PCI Err\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) pdata->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) devres_remove_group(&pdev->dev, mv64x60_pci_err_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /* get this far and it's successful */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) edac_dbg(3, "success\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) edac_pci_del_device(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) edac_pci_free_ctl_info(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) devres_release_group(&pdev->dev, mv64x60_pci_err_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) static int mv64x60_pci_err_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) edac_dbg(0, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) edac_pci_del_device(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) edac_pci_free_ctl_info(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static struct platform_driver mv64x60_pci_err_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) .probe = mv64x60_pci_err_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) .remove = mv64x60_pci_err_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) .name = "mv64x60_pci_err",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) #endif /* CONFIG_PCI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /*********************** SRAM err device **********************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static void mv64x60_sram_check(struct edac_device_ctl_info *edac_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct mv64x60_sram_pdata *pdata = edac_dev->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) u32 cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) cause = readl(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (!cause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) printk(KERN_ERR "Error in internal SRAM\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) printk(KERN_ERR "Cause register: 0x%08x\n", cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) printk(KERN_ERR "Address Low: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) readl(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_LO));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) printk(KERN_ERR "Address High: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) readl(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_HI));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) printk(KERN_ERR "Data Low: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) readl(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_LO));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) printk(KERN_ERR "Data High: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) readl(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_HI));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) printk(KERN_ERR "Parity: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) readl(pdata->sram_vbase + MV64X60_SRAM_ERR_PARITY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) writel(0, pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) static irqreturn_t mv64x60_sram_isr(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct edac_device_ctl_info *edac_dev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct mv64x60_sram_pdata *pdata = edac_dev->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) u32 cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) cause = readl(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (!cause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) mv64x60_sram_check(edac_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static int mv64x60_sram_err_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) struct edac_device_ctl_info *edac_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) struct mv64x60_sram_pdata *pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct resource *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) int res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (!devres_open_group(&pdev->dev, mv64x60_sram_err_probe, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) "sram", 1, NULL, 0, 0, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) edac_dev_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (!edac_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) devres_release_group(&pdev->dev, mv64x60_sram_err_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) pdata = edac_dev->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) pdata->name = "mv64x60_sram_err";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) edac_dev->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) platform_set_drvdata(pdev, edac_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) edac_dev->dev_name = dev_name(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (!r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) printk(KERN_ERR "%s: Unable to get resource for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) "SRAM err regs\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) res = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (!devm_request_mem_region(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) r->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) resource_size(r),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) pdata->name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) printk(KERN_ERR "%s: Error while request mem region\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) res = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) pdata->sram_vbase = devm_ioremap(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) r->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) resource_size(r));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (!pdata->sram_vbase) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) printk(KERN_ERR "%s: Unable to setup SRAM err regs\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) res = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /* setup SRAM err registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) writel(0, pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) edac_dev->mod_name = EDAC_MOD_STR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) edac_dev->ctl_name = pdata->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (edac_op_state == EDAC_OPSTATE_POLL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) edac_dev->edac_check = mv64x60_sram_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) pdata->edac_idx = edac_dev_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (edac_device_add_device(edac_dev) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) edac_dbg(3, "failed edac_device_add_device()\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (edac_op_state == EDAC_OPSTATE_INT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) pdata->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) res = devm_request_irq(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) pdata->irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) mv64x60_sram_isr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) "[EDAC] SRAM err",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) edac_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (res < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) printk(KERN_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) "%s: Unable to request irq %d for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) "MV64x60 SRAM ERR\n", __func__, pdata->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) res = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for SRAM Err\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) pdata->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) devres_remove_group(&pdev->dev, mv64x60_sram_err_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /* get this far and it's successful */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) edac_dbg(3, "success\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) edac_device_del_device(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) devres_release_group(&pdev->dev, mv64x60_sram_err_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) edac_device_free_ctl_info(edac_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) static int mv64x60_sram_err_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) edac_dbg(0, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) edac_device_del_device(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) edac_device_free_ctl_info(edac_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static struct platform_driver mv64x60_sram_err_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) .probe = mv64x60_sram_err_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) .remove = mv64x60_sram_err_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) .name = "mv64x60_sram_err",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /*********************** CPU err device **********************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) static void mv64x60_cpu_check(struct edac_device_ctl_info *edac_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct mv64x60_cpu_pdata *pdata = edac_dev->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) u32 cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) cause = readl(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) MV64x60_CPU_CAUSE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (!cause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) printk(KERN_ERR "Error on CPU interface\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) printk(KERN_ERR "Cause register: 0x%08x\n", cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) printk(KERN_ERR "Address Low: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) readl(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_LO));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) printk(KERN_ERR "Address High: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) readl(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_HI));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) printk(KERN_ERR "Data Low: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) readl(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_LO));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) printk(KERN_ERR "Data High: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) readl(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_HI));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) printk(KERN_ERR "Parity: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) readl(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_PARITY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) writel(0, pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) static irqreturn_t mv64x60_cpu_isr(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) struct edac_device_ctl_info *edac_dev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) struct mv64x60_cpu_pdata *pdata = edac_dev->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) u32 cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) cause = readl(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) MV64x60_CPU_CAUSE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (!cause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) mv64x60_cpu_check(edac_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static int mv64x60_cpu_err_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct edac_device_ctl_info *edac_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct resource *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct mv64x60_cpu_pdata *pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) int res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (!devres_open_group(&pdev->dev, mv64x60_cpu_err_probe, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) "cpu", 1, NULL, 0, 0, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) edac_dev_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (!edac_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) devres_release_group(&pdev->dev, mv64x60_cpu_err_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) pdata = edac_dev->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) pdata->name = "mv64x60_cpu_err";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) edac_dev->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) platform_set_drvdata(pdev, edac_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) edac_dev->dev_name = dev_name(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (!r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) printk(KERN_ERR "%s: Unable to get resource for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) "CPU err regs\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) res = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (!devm_request_mem_region(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) r->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) resource_size(r),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) pdata->name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) printk(KERN_ERR "%s: Error while requesting mem region\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) res = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) pdata->cpu_vbase[0] = devm_ioremap(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) r->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) resource_size(r));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (!pdata->cpu_vbase[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) printk(KERN_ERR "%s: Unable to setup CPU err regs\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) res = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (!r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) printk(KERN_ERR "%s: Unable to get resource for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) "CPU err regs\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) res = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (!devm_request_mem_region(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) r->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) resource_size(r),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) pdata->name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) printk(KERN_ERR "%s: Error while requesting mem region\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) res = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) pdata->cpu_vbase[1] = devm_ioremap(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) r->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) resource_size(r));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (!pdata->cpu_vbase[1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) printk(KERN_ERR "%s: Unable to setup CPU err regs\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) res = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /* setup CPU err registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) writel(0, pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) writel(0, pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) writel(0x000000ff, pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) edac_dev->mod_name = EDAC_MOD_STR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) edac_dev->ctl_name = pdata->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (edac_op_state == EDAC_OPSTATE_POLL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) edac_dev->edac_check = mv64x60_cpu_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) pdata->edac_idx = edac_dev_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (edac_device_add_device(edac_dev) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) edac_dbg(3, "failed edac_device_add_device()\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (edac_op_state == EDAC_OPSTATE_INT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) pdata->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) res = devm_request_irq(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) pdata->irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) mv64x60_cpu_isr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) "[EDAC] CPU err",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) edac_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (res < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) printk(KERN_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) "%s: Unable to request irq %d for MV64x60 "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) "CPU ERR\n", __func__, pdata->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) res = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) printk(KERN_INFO EDAC_MOD_STR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) " acquired irq %d for CPU Err\n", pdata->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) devres_remove_group(&pdev->dev, mv64x60_cpu_err_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) /* get this far and it's successful */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) edac_dbg(3, "success\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) edac_device_del_device(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) devres_release_group(&pdev->dev, mv64x60_cpu_err_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) edac_device_free_ctl_info(edac_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) static int mv64x60_cpu_err_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) edac_dbg(0, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) edac_device_del_device(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) edac_device_free_ctl_info(edac_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) static struct platform_driver mv64x60_cpu_err_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) .probe = mv64x60_cpu_err_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) .remove = mv64x60_cpu_err_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) .name = "mv64x60_cpu_err",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) /*********************** DRAM err device **********************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) static void mv64x60_mc_check(struct mem_ctl_info *mci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) struct mv64x60_mc_pdata *pdata = mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) u32 err_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) u32 sdram_ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) u32 comp_ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) u32 syndrome;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) reg = readl(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (!reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) err_addr = reg & ~0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) sdram_ecc = readl(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_RCVD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) comp_ecc = readl(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CALC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) syndrome = sdram_ecc ^ comp_ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) /* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (!(reg & 0x1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) err_addr >> PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) err_addr & PAGE_MASK, syndrome,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 0, 0, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) mci->ctl_name, "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) else /* 2 bit error, UE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) err_addr >> PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) err_addr & PAGE_MASK, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 0, 0, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) mci->ctl_name, "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) /* clear the error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) writel(0, pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) static irqreturn_t mv64x60_mc_isr(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) struct mem_ctl_info *mci = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) struct mv64x60_mc_pdata *pdata = mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) reg = readl(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (!reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) /* writing 0's to the ECC err addr in check function clears irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) mv64x60_mc_check(mci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) static void get_total_mem(struct mv64x60_mc_pdata *pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) struct device_node *np = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) const unsigned int *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) np = of_find_node_by_type(NULL, "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (!np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) reg = of_get_property(np, "reg", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) pdata->total_mem = reg[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) static void mv64x60_init_csrows(struct mem_ctl_info *mci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) struct mv64x60_mc_pdata *pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) struct csrow_info *csrow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) struct dimm_info *dimm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) u32 devtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) u32 ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) get_total_mem(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) ctl = readl(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) csrow = mci->csrows[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) dimm = csrow->channels[0]->dimm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) dimm->nr_pages = pdata->total_mem >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) dimm->grain = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) dimm->mtype = (ctl & MV64X60_SDRAM_REGISTERED) ? MEM_RDDR : MEM_DDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) devtype = (ctl >> 20) & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) switch (devtype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) case 0x0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) dimm->dtype = DEV_X32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) case 0x2: /* could be X8 too, but no way to tell */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) dimm->dtype = DEV_X16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) case 0x3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) dimm->dtype = DEV_X4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) dimm->dtype = DEV_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) dimm->edac_mode = EDAC_SECDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) static int mv64x60_mc_err_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) struct mem_ctl_info *mci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) struct edac_mc_layer layers[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) struct mv64x60_mc_pdata *pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) struct resource *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) u32 ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) int res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (!devres_open_group(&pdev->dev, mv64x60_mc_err_probe, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) layers[0].size = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) layers[0].is_virt_csrow = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) layers[1].type = EDAC_MC_LAYER_CHANNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) layers[1].size = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) layers[1].is_virt_csrow = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) sizeof(struct mv64x60_mc_pdata));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) if (!mci) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) printk(KERN_ERR "%s: No memory for CPU err\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) devres_release_group(&pdev->dev, mv64x60_mc_err_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) pdata = mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) mci->pdev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) platform_set_drvdata(pdev, mci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) pdata->name = "mv64x60_mc_err";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) mci->dev_name = dev_name(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) pdata->edac_idx = edac_mc_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (!r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) printk(KERN_ERR "%s: Unable to get resource for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) "MC err regs\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) res = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (!devm_request_mem_region(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) r->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) resource_size(r),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) pdata->name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) printk(KERN_ERR "%s: Error while requesting mem region\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) res = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) pdata->mc_vbase = devm_ioremap(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) r->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) resource_size(r));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (!pdata->mc_vbase) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) res = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) ctl = readl(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (!(ctl & MV64X60_SDRAM_ECC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /* Non-ECC RAM? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) res = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) edac_dbg(3, "init mci\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) mci->edac_cap = EDAC_FLAG_SECDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) mci->mod_name = EDAC_MOD_STR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) mci->ctl_name = mv64x60_ctl_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) if (edac_op_state == EDAC_OPSTATE_POLL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) mci->edac_check = mv64x60_mc_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) mci->ctl_page_to_phys = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) mci->scrub_mode = SCRUB_SW_SRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) mv64x60_init_csrows(mci, pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) /* setup MC registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) writel(0, pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) ctl = readl(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) ctl = (ctl & 0xff00ffff) | 0x10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) writel(ctl, pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) res = edac_mc_add_mc(mci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) edac_dbg(3, "failed edac_mc_add_mc()\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (edac_op_state == EDAC_OPSTATE_INT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) /* acquire interrupt that reports errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) pdata->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) res = devm_request_irq(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) pdata->irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) mv64x60_mc_isr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) "[EDAC] MC err",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) mci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (res < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) printk(KERN_ERR "%s: Unable to request irq %d for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) "MV64x60 DRAM ERR\n", __func__, pdata->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) res = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for MC Err\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) pdata->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) /* get this far and it's successful */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) edac_dbg(3, "success\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) edac_mc_del_mc(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) devres_release_group(&pdev->dev, mv64x60_mc_err_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) edac_mc_free(mci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) static int mv64x60_mc_err_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) struct mem_ctl_info *mci = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) edac_dbg(0, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) edac_mc_del_mc(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) edac_mc_free(mci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) static struct platform_driver mv64x60_mc_err_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) .probe = mv64x60_mc_err_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) .remove = mv64x60_mc_err_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) .name = "mv64x60_mc_err",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) static struct platform_driver * const drivers[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) &mv64x60_mc_err_driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) &mv64x60_cpu_err_driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) &mv64x60_sram_err_driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) #ifdef CONFIG_PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) &mv64x60_pci_err_driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) static int __init mv64x60_edac_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) printk(KERN_INFO "Marvell MV64x60 EDAC driver " MV64x60_REVISION "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) printk(KERN_INFO "\t(C) 2006-2007 MontaVista Software\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) /* make sure error reporting method is sane */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) switch (edac_op_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) case EDAC_OPSTATE_POLL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) case EDAC_OPSTATE_INT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) edac_op_state = EDAC_OPSTATE_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) module_init(mv64x60_edac_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) static void __exit mv64x60_edac_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) module_exit(mv64x60_edac_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) MODULE_AUTHOR("Montavista Software, Inc.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) module_param(edac_op_state, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) MODULE_PARM_DESC(edac_op_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) "EDAC Error Reporting state: 0=Poll, 2=Interrupt");