^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Intel e752x Memory Controller kernel module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * (C) 2004 Linux Networx (http://lnxi.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This file may be distributed under the terms of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * GNU General Public License.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Implement support for the e7520, E7525, e7320 and i3100 memory controllers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Datasheets:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * https://www.intel.in/content/www/in/en/chipsets/e7525-memory-controller-hub-datasheet.html
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * ftp://download.intel.com/design/intarch/datashts/31345803.pdf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Written by Tom Zimmerman
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Contributors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Thayne Harbaugh at realmsys.com (?)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Wang Zhenyu at intel.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Dave Jiang at mvista.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/pci_ids.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/edac.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "edac_module.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define EDAC_MOD_STR "e752x_edac"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static int report_non_memory_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static int force_function_unhide;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static int sysbus_parity = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static struct edac_pci_ctl_info *e752x_pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define e752x_printk(level, fmt, arg...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) edac_printk(level, "e752x", fmt, ##arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define e752x_mc_printk(mci, level, fmt, arg...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) edac_mc_chipset_printk(mci, level, "e752x", fmt, ##arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #ifndef PCI_DEVICE_ID_INTEL_7520_0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define PCI_DEVICE_ID_INTEL_7520_0 0x3590
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #endif /* PCI_DEVICE_ID_INTEL_7520_0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #ifndef PCI_DEVICE_ID_INTEL_7520_1_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define PCI_DEVICE_ID_INTEL_7520_1_ERR 0x3591
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #endif /* PCI_DEVICE_ID_INTEL_7520_1_ERR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #ifndef PCI_DEVICE_ID_INTEL_7525_0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define PCI_DEVICE_ID_INTEL_7525_0 0x359E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #endif /* PCI_DEVICE_ID_INTEL_7525_0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #ifndef PCI_DEVICE_ID_INTEL_7525_1_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define PCI_DEVICE_ID_INTEL_7525_1_ERR 0x3593
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #endif /* PCI_DEVICE_ID_INTEL_7525_1_ERR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #ifndef PCI_DEVICE_ID_INTEL_7320_0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define PCI_DEVICE_ID_INTEL_7320_0 0x3592
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #endif /* PCI_DEVICE_ID_INTEL_7320_0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #ifndef PCI_DEVICE_ID_INTEL_7320_1_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define PCI_DEVICE_ID_INTEL_7320_1_ERR 0x3593
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #endif /* PCI_DEVICE_ID_INTEL_7320_1_ERR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #ifndef PCI_DEVICE_ID_INTEL_3100_0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define PCI_DEVICE_ID_INTEL_3100_0 0x35B0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #endif /* PCI_DEVICE_ID_INTEL_3100_0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #ifndef PCI_DEVICE_ID_INTEL_3100_1_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define PCI_DEVICE_ID_INTEL_3100_1_ERR 0x35B1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #endif /* PCI_DEVICE_ID_INTEL_3100_1_ERR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define E752X_NR_CSROWS 8 /* number of csrows */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /* E752X register addresses - device 0 function 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define E752X_MCHSCRB 0x52 /* Memory Scrub register (16b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * 6:5 Scrub Completion Count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * 3:2 Scrub Rate (i3100 only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * 01=fast 10=normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * 1:0 Scrub Mode enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * 00=off 10=on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define E752X_DRB 0x60 /* DRAM row boundary register (8b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define E752X_DRA 0x70 /* DRAM row attribute register (8b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * 31:30 Device width row 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * 01=x8 10=x4 11=x8 DDR2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * 27:26 Device width row 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * 23:22 Device width row 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * 19:20 Device width row 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * 15:14 Device width row 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * 11:10 Device width row 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * 7:6 Device width row 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * 3:2 Device width row 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define E752X_DRC 0x7C /* DRAM controller mode reg (32b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /* FIXME:IS THIS RIGHT? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * 22 Number channels 0=1,1=2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * 19:18 DRB Granularity 32/64MB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define E752X_DRM 0x80 /* Dimm mapping register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define E752X_DDRCSR 0x9A /* DDR control and status reg (16b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * 14:12 1 single A, 2 single B, 3 dual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define E752X_TOLM 0xC4 /* DRAM top of low memory reg (16b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define E752X_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define E752X_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define E752X_REMAPOFFSET 0xCA /* DRAM remap limit offset reg (16b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /* E752X register addresses - device 0 function 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define E752X_FERR_GLOBAL 0x40 /* Global first error register (32b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define E752X_NERR_GLOBAL 0x44 /* Global next error register (32b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define E752X_HI_FERR 0x50 /* Hub interface first error reg (8b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define E752X_HI_NERR 0x52 /* Hub interface next error reg (8b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define E752X_HI_ERRMASK 0x54 /* Hub interface error mask reg (8b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define E752X_HI_SMICMD 0x5A /* Hub interface SMI command reg (8b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define E752X_SYSBUS_FERR 0x60 /* System buss first error reg (16b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define E752X_SYSBUS_NERR 0x62 /* System buss next error reg (16b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define E752X_SYSBUS_ERRMASK 0x64 /* System buss error mask reg (16b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define E752X_SYSBUS_SMICMD 0x6A /* System buss SMI command reg (16b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define E752X_BUF_FERR 0x70 /* Memory buffer first error reg (8b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define E752X_BUF_NERR 0x72 /* Memory buffer next error reg (8b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define E752X_BUF_ERRMASK 0x74 /* Memory buffer error mask reg (8b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define E752X_BUF_SMICMD 0x7A /* Memory buffer SMI cmd reg (8b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define E752X_DRAM_FERR 0x80 /* DRAM first error register (16b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define E752X_DRAM_NERR 0x82 /* DRAM next error register (16b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define E752X_DRAM_ERRMASK 0x84 /* DRAM error mask register (8b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define E752X_DRAM_SMICMD 0x8A /* DRAM SMI command register (8b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define E752X_DRAM_RETR_ADD 0xAC /* DRAM Retry address register (32b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define E752X_DRAM_SEC1_ADD 0xA0 /* DRAM first correctable memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* error address register (32b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * 31 Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * 30:2 CE address (64 byte block 34:6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * 1 Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * 0 HiLoCS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define E752X_DRAM_SEC2_ADD 0xC8 /* DRAM first correctable memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* error address register (32b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * 31 Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * 30:2 CE address (64 byte block 34:6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * 1 Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * 0 HiLoCS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #define E752X_DRAM_DED_ADD 0xA4 /* DRAM first uncorrectable memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /* error address register (32b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * 31 Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * 30:2 CE address (64 byte block 34:6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * 1 Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * 0 HiLoCS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define E752X_DRAM_SCRB_ADD 0xA8 /* DRAM 1st uncorrectable scrub mem */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /* error address register (32b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * 31 Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * 30:2 CE address (64 byte block 34:6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * 1 Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * 0 HiLoCS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #define E752X_DRAM_SEC1_SYNDROME 0xC4 /* DRAM first correctable memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /* error syndrome register (16b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #define E752X_DRAM_SEC2_SYNDROME 0xC6 /* DRAM second correctable memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /* error syndrome register (16b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #define E752X_DEVPRES1 0xF4 /* Device Present 1 register (8b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /* 3100 IMCH specific register addresses - device 0 function 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #define I3100_NSI_FERR 0x48 /* NSI first error reg (32b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define I3100_NSI_NERR 0x4C /* NSI next error reg (32b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define I3100_NSI_SMICMD 0x54 /* NSI SMI command register (32b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #define I3100_NSI_EMASK 0x90 /* NSI error mask register (32b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* ICH5R register addresses - device 30 function 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #define ICH5R_PCI_STAT 0x06 /* PCI status register (16b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #define ICH5R_PCI_2ND_STAT 0x1E /* PCI status secondary reg (16b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #define ICH5R_PCI_BRIDGE_CTL 0x3E /* PCI bridge control register (16b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) enum e752x_chips {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) E7520 = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) E7525 = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) E7320 = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) I3100 = 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * Those chips Support single-rank and dual-rank memories only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * On e752x chips, the odd rows are present only on dual-rank memories.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * Dividing the rank by two will provide the dimm#
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * i3100 MC has a different mapping: it supports only 4 ranks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * The mapping is (from 1 to n):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * slot single-ranked double-ranked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * dimm #1 -> rank #4 NA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * dimm #2 -> rank #3 NA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * dimm #3 -> rank #2 Ranks 2 and 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * dimm #4 -> rank $1 Ranks 1 and 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * FIXME: The current mapping for i3100 considers that it supports up to 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * ranks/chanel, but datasheet says that the MC supports only 4 ranks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct e752x_pvt {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct pci_dev *dev_d0f0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct pci_dev *dev_d0f1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) u32 tolm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) u32 remapbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) u32 remaplimit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) int mc_symmetric;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) u8 map[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) int map_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) const struct e752x_dev_info *dev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct e752x_dev_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) u16 err_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) u16 ctl_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) const char *ctl_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct e752x_error_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) u32 ferr_global;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) u32 nerr_global;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) u32 nsi_ferr; /* 3100 only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) u32 nsi_nerr; /* 3100 only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) u8 hi_ferr; /* all but 3100 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) u8 hi_nerr; /* all but 3100 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) u16 sysbus_ferr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) u16 sysbus_nerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) u8 buf_ferr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) u8 buf_nerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) u16 dram_ferr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) u16 dram_nerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) u32 dram_sec1_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) u32 dram_sec2_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) u16 dram_sec1_syndrome;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) u16 dram_sec2_syndrome;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) u32 dram_ded_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) u32 dram_scrb_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) u32 dram_retr_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static const struct e752x_dev_info e752x_devs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) [E7520] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) .err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) .ctl_dev = PCI_DEVICE_ID_INTEL_7520_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) .ctl_name = "E7520"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) [E7525] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) .err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) .ctl_dev = PCI_DEVICE_ID_INTEL_7525_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) .ctl_name = "E7525"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) [E7320] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) .ctl_dev = PCI_DEVICE_ID_INTEL_7320_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) .ctl_name = "E7320"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) [I3100] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) .err_dev = PCI_DEVICE_ID_INTEL_3100_1_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) .ctl_dev = PCI_DEVICE_ID_INTEL_3100_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) .ctl_name = "3100"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /* Valid scrub rates for the e752x/3100 hardware memory scrubber. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * map the scrubbing bandwidth to a hardware register value. The 'set'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * operation finds the 'matching or higher value'. Note that scrubbing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * on the e752x can only be enabled/disabled. The 3100 supports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * a normal and fast mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) #define SDRATE_EOT 0xFFFFFFFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct scrubrate {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) u32 bandwidth; /* bandwidth consumed by scrubbing in bytes/sec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) u16 scrubval; /* register value for scrub rate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /* Rate below assumes same performance as i3100 using PC3200 DDR2 in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * normal mode. e752x bridges don't support choosing normal or fast mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * so the scrubbing bandwidth value isn't all that important - scrubbing is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * either on or off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static const struct scrubrate scrubrates_e752x[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {0, 0x00}, /* Scrubbing Off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {500000, 0x02}, /* Scrubbing On */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {SDRATE_EOT, 0x00} /* End of Table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /* Fast mode: 2 GByte PC3200 DDR2 scrubbed in 33s = 63161283 bytes/s
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * Normal mode: 125 (32000 / 256) times slower than fast mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static const struct scrubrate scrubrates_i3100[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {0, 0x00}, /* Scrubbing Off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {500000, 0x0a}, /* Normal mode - 32k clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {62500000, 0x06}, /* Fast mode - 256 clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {SDRATE_EOT, 0x00} /* End of Table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) unsigned long page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) u32 remap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) edac_dbg(3, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (page < pvt->tolm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if ((page >= 0x100000) && (page < pvt->remapbase))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) remap = (page - pvt->tolm) + pvt->remapbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (remap < pvt->remaplimit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return remap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) e752x_printk(KERN_ERR, "Invalid page %lx - out of range\n", page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) return pvt->tolm - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) u32 sec1_add, u16 sec1_syndrome)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) u32 page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) int row;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) int channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) edac_dbg(3, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /* convert the addr to 4k page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) page = sec1_add >> (PAGE_SHIFT - 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /* FIXME - check for -1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (pvt->mc_symmetric) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /* chip select are bits 14 & 13 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) row = ((page >> 1) & 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) e752x_printk(KERN_WARNING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) "Test row %d Table %d %d %d %d %d %d %d %d\n", row,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) pvt->map[0], pvt->map[1], pvt->map[2], pvt->map[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) pvt->map[4], pvt->map[5], pvt->map[6],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) pvt->map[7]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /* test for channel remapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) for (i = 0; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (pvt->map[i] == row)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) e752x_printk(KERN_WARNING, "Test computed row %d\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (i < 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) row = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) e752x_mc_printk(mci, KERN_WARNING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) "row %d not found in remap table\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) row);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) row = edac_mc_find_csrow_by_page(mci, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /* 0 = channel A, 1 = channel B */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) channel = !(error_one & 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /* e752x mc reads 34:6 of the DRAM linear address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) page, offset_in_page(sec1_add << 4), sec1_syndrome,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) row, channel, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) "e752x CE", "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) u32 sec1_add, u16 sec1_syndrome, int *error_found,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) int handle_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) *error_found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (handle_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) do_process_ce(mci, error_one, sec1_add, sec1_syndrome);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) u32 ded_add, u32 scrb_add)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) u32 error_2b, block_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) int row;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) edac_dbg(3, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (error_one & 0x0202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) error_2b = ded_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) /* convert to 4k address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) block_page = error_2b >> (PAGE_SHIFT - 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) row = pvt->mc_symmetric ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /* chip select are bits 14 & 13 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) ((block_page >> 1) & 3) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) edac_mc_find_csrow_by_page(mci, block_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /* e752x mc reads 34:6 of the DRAM linear address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) block_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) offset_in_page(error_2b << 4), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) row, -1, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) "e752x UE from Read", "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (error_one & 0x0404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) error_2b = scrb_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) /* convert to 4k address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) block_page = error_2b >> (PAGE_SHIFT - 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) row = pvt->mc_symmetric ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /* chip select are bits 14 & 13 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) ((block_page >> 1) & 3) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) edac_mc_find_csrow_by_page(mci, block_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) /* e752x mc reads 34:6 of the DRAM linear address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) block_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) offset_in_page(error_2b << 4), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) row, -1, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) "e752x UE from Scruber", "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) static inline void process_ue(struct mem_ctl_info *mci, u16 error_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) u32 ded_add, u32 scrb_add, int *error_found,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) int handle_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) *error_found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (handle_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) do_process_ue(mci, error_one, ded_add, scrb_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) int *error_found, int handle_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) *error_found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (!handle_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) edac_dbg(3, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) -1, -1, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) "e752x UE log memory write", "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) u32 retry_add)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) u32 error_1b, page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) int row;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) error_1b = retry_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /* chip select are bits 14 & 13 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) row = pvt->mc_symmetric ? ((page >> 1) & 3) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) edac_mc_find_csrow_by_page(mci, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) e752x_mc_printk(mci, KERN_WARNING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) "CE page 0x%lx, row %d : Memory read retry\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) (long unsigned int)page, row);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) u32 retry_add, int *error_found,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) int handle_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) *error_found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (handle_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) do_process_ded_retry(mci, error, retry_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) int *error_found, int handle_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) *error_found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (handle_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) e752x_mc_printk(mci, KERN_WARNING, "Memory threshold CE\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) static char *global_message[11] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) "PCI Express C1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) "PCI Express C",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) "PCI Express B1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) "PCI Express B",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) "PCI Express A1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) "PCI Express A",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) "DMA Controller",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) "HUB or NS Interface",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) "System Bus",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) "DRAM Controller", /* 9th entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) "Internal Buffer"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) #define DRAM_ENTRY 9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) static char *fatal_message[2] = { "Non-Fatal ", "Fatal " };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) static void do_global_error(int fatal, u32 errors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) for (i = 0; i < 11; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (errors & (1 << i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /* If the error is from DRAM Controller OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * we are to report ALL errors, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * report the error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if ((i == DRAM_ENTRY) || report_non_memory_errors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) e752x_printk(KERN_WARNING, "%sError %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) fatal_message[fatal],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) global_message[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) static inline void global_error(int fatal, u32 errors, int *error_found,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) int handle_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) *error_found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (handle_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) do_global_error(fatal, errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) static char *hub_message[7] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) "HI Address or Command Parity", "HI Illegal Access",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) "HI Internal Parity", "Out of Range Access",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) "HI Data Parity", "Enhanced Config Access",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) "Hub Interface Target Abort"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) static void do_hub_error(int fatal, u8 errors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) for (i = 0; i < 7; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (errors & (1 << i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) e752x_printk(KERN_WARNING, "%sError %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) fatal_message[fatal], hub_message[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) static inline void hub_error(int fatal, u8 errors, int *error_found,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) int handle_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) *error_found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (handle_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) do_hub_error(fatal, errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) #define NSI_FATAL_MASK 0x0c080081
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) #define NSI_NON_FATAL_MASK 0x23a0ba64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) #define NSI_ERR_MASK (NSI_FATAL_MASK | NSI_NON_FATAL_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) static char *nsi_message[30] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) "NSI Link Down", /* NSI_FERR/NSI_NERR bit 0, fatal error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) "", /* reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) "NSI Parity Error", /* bit 2, non-fatal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) "", /* reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) "", /* reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) "Correctable Error Message", /* bit 5, non-fatal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) "Non-Fatal Error Message", /* bit 6, non-fatal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) "Fatal Error Message", /* bit 7, fatal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) "", /* reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) "Receiver Error", /* bit 9, non-fatal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) "", /* reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) "Bad TLP", /* bit 11, non-fatal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) "Bad DLLP", /* bit 12, non-fatal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) "REPLAY_NUM Rollover", /* bit 13, non-fatal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) "", /* reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) "Replay Timer Timeout", /* bit 15, non-fatal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) "", /* reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) "", /* reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) "", /* reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) "Data Link Protocol Error", /* bit 19, fatal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) "", /* reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) "Poisoned TLP", /* bit 21, non-fatal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) "", /* reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) "Completion Timeout", /* bit 23, non-fatal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) "Completer Abort", /* bit 24, non-fatal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) "Unexpected Completion", /* bit 25, non-fatal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) "Receiver Overflow", /* bit 26, fatal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) "Malformed TLP", /* bit 27, fatal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) "", /* reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) "Unsupported Request" /* bit 29, non-fatal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) static void do_nsi_error(int fatal, u32 errors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) for (i = 0; i < 30; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (errors & (1 << i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) printk(KERN_WARNING "%sError %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) fatal_message[fatal], nsi_message[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) static inline void nsi_error(int fatal, u32 errors, int *error_found,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) int handle_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) *error_found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (handle_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) do_nsi_error(fatal, errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) static char *membuf_message[4] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) "Internal PMWB to DRAM parity",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) "Internal PMWB to System Bus Parity",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) "Internal System Bus or IO to PMWB Parity",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) "Internal DRAM to PMWB Parity"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) static void do_membuf_error(u8 errors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (errors & (1 << i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) e752x_printk(KERN_WARNING, "Non-Fatal Error %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) membuf_message[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) static inline void membuf_error(u8 errors, int *error_found, int handle_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) *error_found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (handle_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) do_membuf_error(errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) static char *sysbus_message[10] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) "Addr or Request Parity",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) "Data Strobe Glitch",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) "Addr Strobe Glitch",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) "Data Parity",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) "Addr Above TOM",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) "Non DRAM Lock Error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) "MCERR", "BINIT",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) "Memory Parity",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) "IO Subsystem Parity"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) static void do_sysbus_error(int fatal, u32 errors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) for (i = 0; i < 10; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (errors & (1 << i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) e752x_printk(KERN_WARNING, "%sError System Bus %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) fatal_message[fatal], sysbus_message[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) static inline void sysbus_error(int fatal, u32 errors, int *error_found,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) int handle_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) *error_found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (handle_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) do_sysbus_error(fatal, errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) static void e752x_check_hub_interface(struct e752x_error_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) int *error_found, int handle_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) u8 stat8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) //pci_read_config_byte(dev,E752X_HI_FERR,&stat8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) stat8 = info->hi_ferr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (stat8 & 0x7f) { /* Error, so process */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) stat8 &= 0x7f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (stat8 & 0x2b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) hub_error(1, stat8 & 0x2b, error_found, handle_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) if (stat8 & 0x54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) hub_error(0, stat8 & 0x54, error_found, handle_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) //pci_read_config_byte(dev,E752X_HI_NERR,&stat8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) stat8 = info->hi_nerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (stat8 & 0x7f) { /* Error, so process */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) stat8 &= 0x7f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (stat8 & 0x2b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) hub_error(1, stat8 & 0x2b, error_found, handle_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (stat8 & 0x54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) hub_error(0, stat8 & 0x54, error_found, handle_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) static void e752x_check_ns_interface(struct e752x_error_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) int *error_found, int handle_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) u32 stat32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) stat32 = info->nsi_ferr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (stat32 & NSI_ERR_MASK) { /* Error, so process */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) if (stat32 & NSI_FATAL_MASK) /* check for fatal errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) handle_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (stat32 & NSI_NON_FATAL_MASK) /* check for non-fatal ones */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) handle_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) stat32 = info->nsi_nerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (stat32 & NSI_ERR_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (stat32 & NSI_FATAL_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) handle_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (stat32 & NSI_NON_FATAL_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) handle_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) static void e752x_check_sysbus(struct e752x_error_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) int *error_found, int handle_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) u32 stat32, error32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) //pci_read_config_dword(dev,E752X_SYSBUS_FERR,&stat32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) stat32 = info->sysbus_ferr + (info->sysbus_nerr << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (stat32 == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) return; /* no errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) error32 = (stat32 >> 16) & 0x3ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) stat32 = stat32 & 0x3ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (stat32 & 0x087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) sysbus_error(1, stat32 & 0x087, error_found, handle_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) if (stat32 & 0x378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) sysbus_error(0, stat32 & 0x378, error_found, handle_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (error32 & 0x087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) sysbus_error(1, error32 & 0x087, error_found, handle_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (error32 & 0x378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) sysbus_error(0, error32 & 0x378, error_found, handle_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) static void e752x_check_membuf(struct e752x_error_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) int *error_found, int handle_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) u8 stat8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) stat8 = info->buf_ferr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (stat8 & 0x0f) { /* Error, so process */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) stat8 &= 0x0f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) membuf_error(stat8, error_found, handle_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) stat8 = info->buf_nerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (stat8 & 0x0f) { /* Error, so process */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) stat8 &= 0x0f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) membuf_error(stat8, error_found, handle_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) static void e752x_check_dram(struct mem_ctl_info *mci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) struct e752x_error_info *info, int *error_found,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) int handle_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) u16 error_one, error_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) error_one = info->dram_ferr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) error_next = info->dram_nerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) /* decode and report errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (error_one & 0x0101) /* check first error correctable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) process_ce(mci, error_one, info->dram_sec1_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) info->dram_sec1_syndrome, error_found, handle_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) if (error_next & 0x0101) /* check next error correctable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) process_ce(mci, error_next, info->dram_sec2_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) info->dram_sec2_syndrome, error_found, handle_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (error_one & 0x4040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) process_ue_no_info_wr(mci, error_found, handle_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (error_next & 0x4040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) process_ue_no_info_wr(mci, error_found, handle_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (error_one & 0x2020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) process_ded_retry(mci, error_one, info->dram_retr_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) error_found, handle_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (error_next & 0x2020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) process_ded_retry(mci, error_next, info->dram_retr_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) error_found, handle_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (error_one & 0x0808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) process_threshold_ce(mci, error_one, error_found, handle_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (error_next & 0x0808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) process_threshold_ce(mci, error_next, error_found,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) handle_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (error_one & 0x0606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) process_ue(mci, error_one, info->dram_ded_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) info->dram_scrb_add, error_found, handle_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (error_next & 0x0606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) process_ue(mci, error_next, info->dram_ded_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) info->dram_scrb_add, error_found, handle_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) static void e752x_get_error_info(struct mem_ctl_info *mci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) struct e752x_error_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) struct pci_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) struct e752x_pvt *pvt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) memset(info, 0, sizeof(*info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) pvt = (struct e752x_pvt *)mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) dev = pvt->dev_d0f1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (info->ferr_global) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) pci_read_config_dword(dev, I3100_NSI_FERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) &info->nsi_ferr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) info->hi_ferr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) pci_read_config_byte(dev, E752X_HI_FERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) &info->hi_ferr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) info->nsi_ferr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) pci_read_config_word(dev, E752X_SYSBUS_FERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) &info->sysbus_ferr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) pci_read_config_word(dev, E752X_DRAM_FERR, &info->dram_ferr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) pci_read_config_dword(dev, E752X_DRAM_SEC1_ADD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) &info->dram_sec1_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) pci_read_config_word(dev, E752X_DRAM_SEC1_SYNDROME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) &info->dram_sec1_syndrome);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) pci_read_config_dword(dev, E752X_DRAM_DED_ADD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) &info->dram_ded_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) pci_read_config_dword(dev, E752X_DRAM_SCRB_ADD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) &info->dram_scrb_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) pci_read_config_dword(dev, E752X_DRAM_RETR_ADD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) &info->dram_retr_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) /* ignore the reserved bits just in case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (info->hi_ferr & 0x7f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) pci_write_config_byte(dev, E752X_HI_FERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) info->hi_ferr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (info->nsi_ferr & NSI_ERR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) pci_write_config_dword(dev, I3100_NSI_FERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) info->nsi_ferr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if (info->sysbus_ferr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) pci_write_config_word(dev, E752X_SYSBUS_FERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) info->sysbus_ferr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (info->buf_ferr & 0x0f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) pci_write_config_byte(dev, E752X_BUF_FERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) info->buf_ferr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (info->dram_ferr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) pci_write_bits16(pvt->dev_d0f1, E752X_DRAM_FERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) info->dram_ferr, info->dram_ferr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) pci_write_config_dword(dev, E752X_FERR_GLOBAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) info->ferr_global);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) if (info->nerr_global) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) pci_read_config_dword(dev, I3100_NSI_NERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) &info->nsi_nerr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) info->hi_nerr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) pci_read_config_byte(dev, E752X_HI_NERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) &info->hi_nerr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) info->nsi_nerr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) pci_read_config_word(dev, E752X_SYSBUS_NERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) &info->sysbus_nerr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) pci_read_config_word(dev, E752X_DRAM_NERR, &info->dram_nerr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) pci_read_config_dword(dev, E752X_DRAM_SEC2_ADD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) &info->dram_sec2_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) pci_read_config_word(dev, E752X_DRAM_SEC2_SYNDROME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) &info->dram_sec2_syndrome);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) if (info->hi_nerr & 0x7f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) pci_write_config_byte(dev, E752X_HI_NERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) info->hi_nerr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) if (info->nsi_nerr & NSI_ERR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) pci_write_config_dword(dev, I3100_NSI_NERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) info->nsi_nerr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) if (info->sysbus_nerr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) pci_write_config_word(dev, E752X_SYSBUS_NERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) info->sysbus_nerr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) if (info->buf_nerr & 0x0f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) pci_write_config_byte(dev, E752X_BUF_NERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) info->buf_nerr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (info->dram_nerr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) pci_write_bits16(pvt->dev_d0f1, E752X_DRAM_NERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) info->dram_nerr, info->dram_nerr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) pci_write_config_dword(dev, E752X_NERR_GLOBAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) info->nerr_global);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) static int e752x_process_error_info(struct mem_ctl_info *mci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) struct e752x_error_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) int handle_errors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) u32 error32, stat32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) int error_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) error_found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) error32 = (info->ferr_global >> 18) & 0x3ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) stat32 = (info->ferr_global >> 4) & 0x7ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (error32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) global_error(1, error32, &error_found, handle_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (stat32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) global_error(0, stat32, &error_found, handle_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) error32 = (info->nerr_global >> 18) & 0x3ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) stat32 = (info->nerr_global >> 4) & 0x7ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) if (error32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) global_error(1, error32, &error_found, handle_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) if (stat32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) global_error(0, stat32, &error_found, handle_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) e752x_check_hub_interface(info, &error_found, handle_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) e752x_check_ns_interface(info, &error_found, handle_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) e752x_check_sysbus(info, &error_found, handle_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) e752x_check_membuf(info, &error_found, handle_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) e752x_check_dram(mci, info, &error_found, handle_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) return error_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) static void e752x_check(struct mem_ctl_info *mci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) struct e752x_error_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) edac_dbg(3, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) e752x_get_error_info(mci, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) e752x_process_error_info(mci, &info, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) /* Program byte/sec bandwidth scrub rate to hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) const struct scrubrate *scrubrates;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) struct pci_dev *pdev = pvt->dev_d0f0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (pvt->dev_info->ctl_dev == PCI_DEVICE_ID_INTEL_3100_0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) scrubrates = scrubrates_i3100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) scrubrates = scrubrates_e752x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) /* Translate the desired scrub rate to a e752x/3100 register value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * Search for the bandwidth that is equal or greater than the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * desired rate and program the cooresponding register value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) if (scrubrates[i].bandwidth >= new_bw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (scrubrates[i].bandwidth == SDRATE_EOT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) pci_write_config_word(pdev, E752X_MCHSCRB, scrubrates[i].scrubval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) return scrubrates[i].bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) /* Convert current scrub rate value into byte/sec bandwidth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) static int get_sdram_scrub_rate(struct mem_ctl_info *mci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) const struct scrubrate *scrubrates;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) struct pci_dev *pdev = pvt->dev_d0f0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) u16 scrubval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (pvt->dev_info->ctl_dev == PCI_DEVICE_ID_INTEL_3100_0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) scrubrates = scrubrates_i3100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) scrubrates = scrubrates_e752x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) /* Find the bandwidth matching the memory scrubber configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) pci_read_config_word(pdev, E752X_MCHSCRB, &scrubval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) scrubval = scrubval & 0x0f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if (scrubrates[i].scrubval == scrubval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) if (scrubrates[i].bandwidth == SDRATE_EOT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) e752x_printk(KERN_WARNING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) "Invalid sdram scrub control value: 0x%x\n", scrubval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) return scrubrates[i].bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) /* Return 1 if dual channel mode is active. Else return 0. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) static inline int dual_channel_active(u16 ddrcsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) return (((ddrcsr >> 12) & 3) == 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) /* Remap csrow index numbers if map_type is "reverse"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) static inline int remap_csrow_index(struct mem_ctl_info *mci, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) struct e752x_pvt *pvt = mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (!pvt->map_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) return (7 - index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) return (index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) u16 ddrcsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) struct csrow_info *csrow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) enum edac_type edac_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) unsigned long last_cumul_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) int index, mem_dev, drc_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) u8 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) u32 dra, drc, cumul_size, i, nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) dra = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) for (index = 0; index < 4; index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) u8 dra_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) pci_read_config_byte(pdev, E752X_DRA + index, &dra_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) dra |= dra_reg << (index * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) pci_read_config_dword(pdev, E752X_DRC, &drc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) drc_chan = dual_channel_active(ddrcsr) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) drc_ddim = (drc >> 20) & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) /* The dram row boundary (DRB) reg values are boundary address for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) * each DRAM row with a granularity of 64 or 128MB (single/dual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) * channel operation). DRB regs are cumulative; therefore DRB7 will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) * contain the total memory contained in all eight rows.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) /* mem_dev 0=x8, 1=x4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) mem_dev = (dra >> (index * 4 + 2)) & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) csrow = mci->csrows[remap_csrow_index(mci, index)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) mem_dev = (mem_dev == 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) pci_read_config_byte(pdev, E752X_DRB + index, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) /* convert a 128 or 64 MiB DRB to a page size. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) if (cumul_size == last_cumul_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) continue; /* not populated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) csrow->first_page = last_cumul_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) csrow->last_page = cumul_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) nr_pages = cumul_size - last_cumul_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) last_cumul_size = cumul_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) * if single channel or x8 devices then SECDED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) * if dual channel and x4 then S4ECD4ED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) if (drc_ddim) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) if (drc_chan && mem_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) edac_mode = EDAC_S4ECD4ED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) edac_mode = EDAC_SECDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) mci->edac_cap |= EDAC_FLAG_SECDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) edac_mode = EDAC_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) for (i = 0; i < csrow->nr_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) struct dimm_info *dimm = csrow->channels[i]->dimm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) edac_dbg(3, "Initializing rank at (%i,%i)\n", index, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) dimm->nr_pages = nr_pages / csrow->nr_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) dimm->mtype = MEM_RDDR; /* only one type supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) dimm->dtype = mem_dev ? DEV_X4 : DEV_X8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) dimm->edac_mode = edac_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) static void e752x_init_mem_map_table(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) struct e752x_pvt *pvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) u8 value, last, row;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) last = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) row = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) for (index = 0; index < 8; index += 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) pci_read_config_byte(pdev, E752X_DRB + index, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) /* test if there is a dimm in this slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) if (value == last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) /* no dimm in the slot, so flag it as empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) pvt->map[index] = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) pvt->map[index + 1] = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) } else { /* there is a dimm in the slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) pvt->map[index] = row;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) row++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) last = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) /* test the next value to see if the dimm is double
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) * sided
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) pci_read_config_byte(pdev, E752X_DRB + index + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) /* the dimm is single sided, so flag as empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) /* this is a double sided dimm to save the next row #*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) pvt->map[index + 1] = (value == last) ? 0xff : row;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) row++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) last = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) /* Return 0 on success or 1 on failure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) static int e752x_get_devs(struct pci_dev *pdev, int dev_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) struct e752x_pvt *pvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) pvt->dev_d0f1 = pci_get_device(PCI_VENDOR_ID_INTEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) pvt->dev_info->err_dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) if (pvt->dev_d0f1 == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) pvt->dev_d0f1 = pci_scan_single_device(pdev->bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) PCI_DEVFN(0, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) pci_dev_get(pvt->dev_d0f1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) if (pvt->dev_d0f1 == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) e752x_printk(KERN_ERR, "error reporting device not found:"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) "vendor %x device 0x%x (broken BIOS?)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) pvt->dev_d0f0 = pci_get_device(PCI_VENDOR_ID_INTEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) e752x_devs[dev_idx].ctl_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) if (pvt->dev_d0f0 == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) pci_dev_put(pvt->dev_d0f1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) /* Setup system bus parity mask register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) * Sysbus parity supported on:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) * e7320/e7520/e7525 + Xeon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) static void e752x_init_sysbus_parity_mask(struct e752x_pvt *pvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) char *cpu_id = cpu_data(0).x86_model_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) struct pci_dev *dev = pvt->dev_d0f1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) int enable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) /* Allow module parameter override, else see if CPU supports parity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) if (sysbus_parity != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) enable = sysbus_parity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) } else if (cpu_id[0] && !strstr(cpu_id, "Xeon")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) e752x_printk(KERN_INFO, "System Bus Parity not "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) "supported by CPU, disabling\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) enable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0309);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) static void e752x_init_error_reporting_regs(struct e752x_pvt *pvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) struct pci_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) dev = pvt->dev_d0f1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) /* Turn off error disable & SMI in case the BIOS turned it on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) pci_write_config_dword(dev, I3100_NSI_EMASK, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) pci_write_config_dword(dev, I3100_NSI_SMICMD, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) e752x_init_sysbus_parity_mask(pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) u16 pci_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) u8 stat8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) struct mem_ctl_info *mci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) struct edac_mc_layer layers[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) struct e752x_pvt *pvt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) u16 ddrcsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) int drc_chan; /* Number of channels 0=1chan,1=2chan */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) struct e752x_error_info discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) edac_dbg(0, "mci\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) edac_dbg(0, "Starting Probe1\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) /* check to see if device 0 function 1 is enabled; if it isn't, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) * assume the BIOS has reserved it for a reason and is expecting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) * exclusive access, we take care not to violate that assumption and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) * fail the probe. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) pci_read_config_byte(pdev, E752X_DEVPRES1, &stat8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) if (!force_function_unhide && !(stat8 & (1 << 5))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) printk(KERN_INFO "Contact your BIOS vendor to see if the "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) "E752x error registers can be safely un-hidden\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) stat8 |= (1 << 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) pci_write_config_byte(pdev, E752X_DEVPRES1, stat8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) /* FIXME: should check >>12 or 0xf, true for all? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) /* Dual channel = 1, Single channel = 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) drc_chan = dual_channel_active(ddrcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) layers[0].size = E752X_NR_CSROWS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) layers[0].is_virt_csrow = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) layers[1].type = EDAC_MC_LAYER_CHANNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) layers[1].size = drc_chan + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) layers[1].is_virt_csrow = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (mci == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) edac_dbg(3, "init mci\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) mci->mtype_cap = MEM_FLAG_RDDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) /* 3100 IMCH supports SECDEC only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) mci->edac_ctl_cap = (dev_idx == I3100) ? EDAC_FLAG_SECDED :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) (EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) /* FIXME - what if different memory types are in different csrows? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) mci->mod_name = EDAC_MOD_STR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) mci->pdev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) edac_dbg(3, "init pvt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) pvt = (struct e752x_pvt *)mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) pvt->dev_info = &e752x_devs[dev_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) if (e752x_get_devs(pdev, dev_idx, pvt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) edac_mc_free(mci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) edac_dbg(3, "more mci init\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) mci->ctl_name = pvt->dev_info->ctl_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) mci->dev_name = pci_name(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) mci->edac_check = e752x_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) mci->ctl_page_to_phys = ctl_page_to_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) mci->set_sdram_scrub_rate = set_sdram_scrub_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) mci->get_sdram_scrub_rate = get_sdram_scrub_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) /* set the map type. 1 = normal, 0 = reversed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) * Must be set before e752x_init_csrows in case csrow mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) * is reversed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) pci_read_config_byte(pdev, E752X_DRM, &stat8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) e752x_init_csrows(mci, pdev, ddrcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) e752x_init_mem_map_table(pdev, pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) if (dev_idx == I3100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) mci->edac_cap = EDAC_FLAG_SECDED; /* the only mode supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) mci->edac_cap |= EDAC_FLAG_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) edac_dbg(3, "tolm, remapbase, remaplimit\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) /* load the top of low memory, remap base, and remap limit vars */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) pci_read_config_word(pdev, E752X_TOLM, &pci_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) pvt->tolm = ((u32) pci_data) << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) pci_read_config_word(pdev, E752X_REMAPBASE, &pci_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) pvt->remapbase = ((u32) pci_data) << 14;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) pci_read_config_word(pdev, E752X_REMAPLIMIT, &pci_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) pvt->remaplimit = ((u32) pci_data) << 14;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) e752x_printk(KERN_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) "tolm = %x, remapbase = %x, remaplimit = %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) pvt->tolm, pvt->remapbase, pvt->remaplimit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) /* Here we assume that we will never see multiple instances of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) * type of memory controller. The ID is therefore hardcoded to 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) if (edac_mc_add_mc(mci)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) edac_dbg(3, "failed edac_mc_add_mc()\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) e752x_init_error_reporting_regs(pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) e752x_get_error_info(mci, &discard); /* clear other MCH errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) /* allocating generic PCI control info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) e752x_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) if (!e752x_pci) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) printk(KERN_WARNING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) "%s(): Unable to create PCI control\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) printk(KERN_WARNING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) "%s(): PCI error report via EDAC not setup\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) /* get this far and it's successful */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) edac_dbg(3, "success\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) pci_dev_put(pvt->dev_d0f0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) pci_dev_put(pvt->dev_d0f1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) edac_mc_free(mci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) /* returns count (>= 0), or negative on error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) static int e752x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) edac_dbg(0, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) /* wake up and enable device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) if (pci_enable_device(pdev) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) return e752x_probe1(pdev, ent->driver_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) static void e752x_remove_one(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) struct mem_ctl_info *mci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) struct e752x_pvt *pvt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) edac_dbg(0, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) if (e752x_pci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) edac_pci_release_generic_ctl(e752x_pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) pvt = (struct e752x_pvt *)mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) pci_dev_put(pvt->dev_d0f0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) pci_dev_put(pvt->dev_d0f1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) edac_mc_free(mci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) static const struct pci_device_id e752x_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) E7520},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) E7525},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) E7320},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) PCI_VEND_DEV(INTEL, 3100_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) I3100},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) } /* 0 terminated list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) MODULE_DEVICE_TABLE(pci, e752x_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) static struct pci_driver e752x_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) .name = EDAC_MOD_STR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) .probe = e752x_init_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) .remove = e752x_remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) .id_table = e752x_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) static int __init e752x_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) int pci_rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) edac_dbg(3, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) /* Ensure that the OPSTATE is set correctly for POLL or NMI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) opstate_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) pci_rc = pci_register_driver(&e752x_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) return (pci_rc < 0) ? pci_rc : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) static void __exit e752x_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) edac_dbg(3, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) pci_unregister_driver(&e752x_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) module_init(e752x_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) module_exit(e752x_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) MODULE_DESCRIPTION("MC support for Intel e752x/3100 memory controllers");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) module_param(force_function_unhide, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) " 1=force unhide and hope BIOS doesn't fight driver for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) "Dev0:Fun1 access");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) module_param(edac_op_state, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) module_param(sysbus_parity, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) MODULE_PARM_DESC(sysbus_parity, "0=disable system bus parity checking,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) " 1=enable system bus parity checking, default=auto-detect");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) module_param(report_non_memory_errors, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) MODULE_PARM_DESC(report_non_memory_errors, "0=disable non-memory error "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) "reporting, 1=enable non-memory error reporting");