Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) #include "amd64_edac.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3) #include <asm/amd_nb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5) static struct edac_pci_ctl_info *pci_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * Set by command line parameter. If BIOS has enabled the ECC, this override is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * cleared to prevent re-enabling the hardware by this driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) static int ecc_enable_override;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) module_param(ecc_enable_override, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) static struct msr __percpu *msrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) static struct amd64_family_type *fam_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) /* Per-node stuff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) static struct ecc_settings **ecc_stngs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) /* Device for the PCI component */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) static struct device *pci_ctl_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  * or higher value'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  *FIXME: Produce a better mapping/linearisation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) static const struct scrubrate {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)        u32 scrubval;           /* bit pattern for scrub rate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)        u32 bandwidth;          /* bandwidth consumed (bytes/sec) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) } scrubrates[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 	{ 0x01, 1600000000UL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 	{ 0x02, 800000000UL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	{ 0x03, 400000000UL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	{ 0x04, 200000000UL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 	{ 0x05, 100000000UL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	{ 0x06, 50000000UL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	{ 0x07, 25000000UL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	{ 0x08, 12284069UL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	{ 0x09, 6274509UL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	{ 0x0A, 3121951UL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	{ 0x0B, 1560975UL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	{ 0x0C, 781440UL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	{ 0x0D, 390720UL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	{ 0x0E, 195300UL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	{ 0x0F, 97650UL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	{ 0x10, 48854UL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	{ 0x11, 24427UL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	{ 0x12, 12213UL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	{ 0x13, 6101UL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	{ 0x14, 3051UL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	{ 0x15, 1523UL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	{ 0x16, 761UL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	{ 0x00, 0UL},        /* scrubbing off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 			       u32 *val, const char *func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	err = pci_read_config_dword(pdev, offset, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 		amd64_warn("%s: error reading F%dx%03x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 			   func, PCI_FUNC(pdev->devfn), offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 				u32 val, const char *func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	err = pci_write_config_dword(pdev, offset, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 		amd64_warn("%s: error writing to F%dx%03x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 			   func, PCI_FUNC(pdev->devfn), offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87)  * Select DCT to which PCI cfg accesses are routed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	u32 reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	reg &= (pvt->model == 0x30) ? ~3 : ~1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	reg |= dct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101)  * Depending on the family, F2 DCT reads need special handling:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103)  * K8: has a single DCT only and no address offsets >= 0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105)  * F10h: each DCT has its own set of regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106)  *	DCT0 -> F2x040..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107)  *	DCT1 -> F2x140..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109)  * F16h: has only 1 DCT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111)  * F15h: we select which DCT we access using F1x10C[DctCfgSel]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 					 int offset, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	switch (pvt->fam) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	case 0xf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 		if (dct || offset >= 0x100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	case 0x10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 		if (dct) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 			 * Note: If ganging is enabled, barring the regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 			 * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 			 * return 0. (cf. Section 2.8.1 F10h BKDG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 			if (dct_ganging_enabled(pvt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 				return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 			offset += 0x100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	case 0x15:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 		 * F15h: F2x1xx addresses do not map explicitly to DCT1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 		 * We should select which DCT we access using F1x10C[DctCfgSel]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 		dct = (dct && pvt->model == 0x30) ? 3 : dct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 		f15h_select_dct(pvt, dct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	case 0x16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 		if (dct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	return amd64_read_pci_cfg(pvt->F2, offset, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157)  * Memory scrubber control interface. For K8, memory scrubbing is handled by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158)  * hardware and can involve L2 cache, dcache as well as the main memory. With
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159)  * F10, this is extended to L3 cache scrubbing on CPU models sporting that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160)  * functionality.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162)  * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163)  * (dram) over to cache lines. This is nasty, so we will use bandwidth in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164)  * bytes/sec for the setting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166)  * Currently, we only do dram scrubbing. If the scrubbing is done in software on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167)  * other archs, we might not have access to the caches directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) static inline void __f17h_set_scrubval(struct amd64_pvt *pvt, u32 scrubval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	 * Fam17h supports scrub values between 0x5 and 0x14. Also, the values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	 * are shifted down by 0x5, so scrubval 0x5 is written to the register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	 * as 0x0, scrubval 0x6 as 0x1, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	if (scrubval >= 0x5 && scrubval <= 0x14) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 		scrubval -= 0x5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		pci_write_bits32(pvt->F6, F17H_SCR_LIMIT_ADDR, scrubval, 0xF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 		pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 1, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 0, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186)  * Scan the scrub rate mapping table for a close or matching bandwidth value to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187)  * issue. If requested is too big, then use last maximum value found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	u32 scrubval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	 * map the configured rate (new_bw) to a value specific to the AMD64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	 * memory controller and apply to register. Search for the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	 * bandwidth entry that is greater or equal than the setting requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	 * and program that. If at last entry, turn off DRAM scrubbing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	 * by falling back to the last element in scrubrates[].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		 * skip scrub rates which aren't recommended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		 * (see F10 BKDG, F3x58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		if (scrubrates[i].scrubval < min_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		if (scrubrates[i].bandwidth <= new_bw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	scrubval = scrubrates[i].scrubval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	if (pvt->umc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 		__f17h_set_scrubval(pvt, scrubval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	} else if (pvt->fam == 0x15 && pvt->model == 0x60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 		f15h_select_dct(pvt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 		f15h_select_dct(pvt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 		pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 		pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	if (scrubval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 		return scrubrates[i].bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	struct amd64_pvt *pvt = mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	u32 min_scrubrate = 0x5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	if (pvt->fam == 0xf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 		min_scrubrate = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	if (pvt->fam == 0x15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 		/* Erratum #505 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 		if (pvt->model < 0x10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 			f15h_select_dct(pvt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 		if (pvt->model == 0x60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 			min_scrubrate = 0x6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	return __set_scrub_rate(pvt, bw, min_scrubrate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) static int get_scrub_rate(struct mem_ctl_info *mci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	struct amd64_pvt *pvt = mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	int i, retval = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	u32 scrubval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	if (pvt->umc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 		amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 		if (scrubval & BIT(0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 			amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 			scrubval &= 0xF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 			scrubval += 0x5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 			scrubval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	} else if (pvt->fam == 0x15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		/* Erratum #505 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 		if (pvt->model < 0x10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 			f15h_select_dct(pvt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		if (pvt->model == 0x60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 			amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 			amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 		amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	scrubval = scrubval & 0x001F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		if (scrubrates[i].scrubval == scrubval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 			retval = scrubrates[i].bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293)  * returns true if the SysAddr given by sys_addr matches the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294)  * DRAM base/limit associated with node_id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	/* The K8 treats this as a 40-bit value.  However, bits 63-40 will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	 * all ones if the most significant implemented address bit is 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	 * Here we discard bits 63-40.  See section 3.4.2 of AMD publication
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	 * Application Programming.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	addr = sys_addr & 0x000000ffffffffffull;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	return ((addr >= get_dram_base(pvt, nid)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		(addr <= get_dram_limit(pvt, nid)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313)  * Attempt to map a SysAddr to a node. On success, return a pointer to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314)  * mem_ctl_info structure for the node that the SysAddr maps to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316)  * On failure, return NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 						u64 sys_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	struct amd64_pvt *pvt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	u8 node_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	u32 intlv_en, bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	 * 3.4.4.2) registers to map the SysAddr to a node ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	pvt = mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	 * The value of this field should be the same for all DRAM Base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	 * registers.  Therefore we arbitrarily choose to read it from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	 * register for node 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	intlv_en = dram_intlv_en(pvt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	if (intlv_en == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 			if (base_limit_match(pvt, sys_addr, node_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 				goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		goto err_no_match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	if (unlikely((intlv_en != 0x01) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		     (intlv_en != 0x03) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		     (intlv_en != 0x07))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	bits = (((u32) sys_addr) >> 12) & intlv_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	for (node_id = 0; ; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 			break;	/* intlv_sel field matches */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		if (++node_id >= DRAM_RANGES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 			goto err_no_match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	/* sanity test for sys_addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 			   "range for node %d with node interleaving enabled.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 			   __func__, sys_addr, node_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	return edac_mc_find((int)node_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) err_no_match:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		 (unsigned long)sys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382)  * compute the CS base address of the @csrow on the DRAM controller @dct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383)  * For details see F2x[5C:40] in the processor's BKDG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 				 u64 *base, u64 *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	u64 csbase, csmask, base_bits, mask_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	u8 addr_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		csbase		= pvt->csels[dct].csbases[csrow];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		csmask		= pvt->csels[dct].csmasks[csrow];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		base_bits	= GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 		mask_bits	= GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		addr_shift	= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	 * F16h and F15h, models 30h and later need two addr_shift values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	 * 8 for high and 6 for low (cf. F16h BKDG).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	} else if (pvt->fam == 0x16 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 		  (pvt->fam == 0x15 && pvt->model >= 0x30)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		csbase          = pvt->csels[dct].csbases[csrow];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		csmask          = pvt->csels[dct].csmasks[csrow >> 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		*base  = (csbase & GENMASK_ULL(15,  5)) << 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		*base |= (csbase & GENMASK_ULL(30, 19)) << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 		*mask = ~0ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		/* poke holes for the csmask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		*mask &= ~((GENMASK_ULL(15, 5)  << 6) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 			   (GENMASK_ULL(30, 19) << 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 		*mask |= (csmask & GENMASK_ULL(15, 5))  << 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		*mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		csbase		= pvt->csels[dct].csbases[csrow];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		csmask		= pvt->csels[dct].csmasks[csrow >> 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		addr_shift	= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		if (pvt->fam == 0x15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 			base_bits = mask_bits =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 				GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 			base_bits = mask_bits =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 				GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	*base  = (csbase & base_bits) << addr_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	*mask  = ~0ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	/* poke holes for the csmask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	*mask &= ~(mask_bits << addr_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	/* OR them in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	*mask |= (csmask & mask_bits) << addr_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) #define for_each_chip_select(i, dct, pvt) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	for (i = 0; i < pvt->csels[dct].b_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) #define chip_select_base(i, dct, pvt) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	pvt->csels[dct].csbases[i]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) #define for_each_chip_select_mask(i, dct, pvt) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	for (i = 0; i < pvt->csels[dct].m_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) #define for_each_umc(i) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	for (i = 0; i < fam_type->max_mcs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454)  * @input_addr is an InputAddr associated with the node given by mci. Return the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455)  * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	struct amd64_pvt *pvt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	int csrow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	u64 base, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	pvt = mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	for_each_chip_select(csrow, 0, pvt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		if (!csrow_enabled(csrow, 0, pvt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		mask = ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		if ((input_addr & mask) == (base & mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 			edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 				 (unsigned long)input_addr, csrow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 				 pvt->mc_node_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 			return csrow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		 (unsigned long)input_addr, pvt->mc_node_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488)  * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489)  * for the node represented by mci. Info is passed back in *hole_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490)  * *hole_offset, and *hole_size.  Function returns 0 if info is valid or 1 if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491)  * info is invalid. Info may be invalid for either of the following reasons:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493)  * - The revision of the node is not E or greater.  In this case, the DRAM Hole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494)  *   Address Register does not exist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496)  * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497)  *   indicating that its contents are not valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499)  * The values passed back in *hole_base, *hole_offset, and *hole_size are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500)  * complete 32-bit values despite the fact that the bitfields in the DHAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501)  * only represent bits 31-24 of the base and offset values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 			     u64 *hole_offset, u64 *hole_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	struct amd64_pvt *pvt = mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	/* only revE and later have the DRAM Hole Address Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 		edac_dbg(1, "  revision %d for node %d does not support DHAR\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 			 pvt->ext_model, pvt->mc_node_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	/* valid for Fam10h and above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		edac_dbg(1, "  Dram Memory Hoisting is DISABLED on this system\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	if (!dhar_valid(pvt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		edac_dbg(1, "  Dram Memory Hoisting is DISABLED on this node %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 			 pvt->mc_node_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	/* This node has Memory Hoisting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	/* +------------------+--------------------+--------------------+-----
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	 * | memory           | DRAM hole          | relocated          |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	 * | [0, (x - 1)]     | [x, 0xffffffff]    | addresses from     |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	 * |                  |                    | DRAM hole          |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	 * |                  |                    | [0x100000000,      |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	 * |                  |                    |  (0x100000000+     |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	 * |                  |                    |   (0xffffffff-x))] |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	 * +------------------+--------------------+--------------------+-----
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	 * Above is a diagram of physical memory showing the DRAM hole and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	 * relocated addresses from the DRAM hole.  As shown, the DRAM hole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	 * starts at address x (the base address) and extends through address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	 * 0xffffffff.  The DRAM Hole Address Register (DHAR) relocates the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	 * addresses in the hole so that they start at 0x100000000.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	*hole_base = dhar_base(pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	*hole_size = (1ULL << 32) - *hole_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	*hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 					: k8_dhar_offset(pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	edac_dbg(1, "  DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		 pvt->mc_node_id, (unsigned long)*hole_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		 (unsigned long)*hole_offset, (unsigned long)*hole_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560)  * Return the DramAddr that the SysAddr given by @sys_addr maps to.  It is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561)  * assumed that sys_addr maps to the node given by mci.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563)  * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564)  * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565)  * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566)  * then it is also involved in translating a SysAddr to a DramAddr. Sections
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567)  * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568)  * These parts of the documentation are unclear. I interpret them as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570)  * When node n receives a SysAddr, it processes the SysAddr as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572)  * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573)  *    Limit registers for node n. If the SysAddr is not within the range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574)  *    specified by the base and limit values, then node n ignores the Sysaddr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575)  *    (since it does not map to node n). Otherwise continue to step 2 below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577)  * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578)  *    disabled so skip to step 3 below. Otherwise see if the SysAddr is within
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579)  *    the range of relocated addresses (starting at 0x100000000) from the DRAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580)  *    hole. If not, skip to step 3 below. Else get the value of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581)  *    DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582)  *    offset defined by this value from the SysAddr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584)  * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585)  *    Base register for node n. To obtain the DramAddr, subtract the base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586)  *    address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	struct amd64_pvt *pvt = mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	dram_base = get_dram_base(pvt, pvt->mc_node_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 				      &hole_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		if ((sys_addr >= (1ULL << 32)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		    (sys_addr < ((1ULL << 32) + hole_size))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 			/* use DHAR to translate SysAddr to DramAddr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 			dram_addr = sys_addr - hole_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 			edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 				 (unsigned long)sys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 				 (unsigned long)dram_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 			return dram_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	 * Translate the SysAddr to a DramAddr as shown near the start of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	 * section 3.4.4 (p. 70).  Although sys_addr is a 64-bit value, the k8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	 * only deals with 40-bit values.  Therefore we discard bits 63-40 of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	 * sys_addr below.  If bit 39 of sys_addr is 1 then the bits we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	 * discard are all 1s.  Otherwise the bits we discard are all 0s.  See
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	 * Programmer's Manual Volume 1 Application Programming.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		 (unsigned long)sys_addr, (unsigned long)dram_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	return dram_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629)  * @intlv_en is the value of the IntlvEn field from a DRAM Base register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630)  * (section 3.4.4.1).  Return the number of bits from a SysAddr that are used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631)  * for node interleaving.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) static int num_node_interleave_bits(unsigned intlv_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	BUG_ON(intlv_en > 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	n = intlv_shift_table[intlv_en];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) /* Translate the DramAddr given by @dram_addr to an InputAddr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	struct amd64_pvt *pvt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	int intlv_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	u64 input_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	pvt = mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	 * concerning translating a DramAddr to an InputAddr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		      (dram_addr & 0xfff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	edac_dbg(2, "  Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		 intlv_shift, (unsigned long)dram_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		 (unsigned long)input_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	return input_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668)  * Translate the SysAddr represented by @sys_addr to an InputAddr.  It is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669)  * assumed that @sys_addr maps to the node given by mci.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	u64 input_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	input_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	    dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	edac_dbg(2, "SysAddr 0x%lx translates to InputAddr 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		 (unsigned long)sys_addr, (unsigned long)input_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	return input_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) /* Map the Error address to a PAGE and PAGE OFFSET. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) static inline void error_address_to_page_and_offset(u64 error_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 						    struct err_info *err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	err->page = (u32) (error_address >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	err->offset = ((u32) error_address) & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693)  * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694)  * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695)  * of a node that detected an ECC memory error.  mci represents the node that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696)  * the error address maps to (possibly different from the node that detected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697)  * the error).  Return the number of the csrow that sys_addr maps to, or -1 on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698)  * error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	int csrow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	if (csrow == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 				  "address 0x%lx\n", (unsigned long)sys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	return csrow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715)  * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716)  * are ECC capable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	unsigned long edac_cap = EDAC_FLAG_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	u8 bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	if (pvt->umc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		for_each_umc(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 			if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 			umc_en_mask |= BIT(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 			/* UMC Configuration bit 12 (DimmEccEn) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 			if (pvt->umc[i].umc_cfg & BIT(12))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 				dimm_ecc_en_mask |= BIT(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		if (umc_en_mask == dimm_ecc_en_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 			edac_cap = EDAC_FLAG_SECDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 			? 19
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 			: 17;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		if (pvt->dclr0 & BIT(bit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 			edac_cap = EDAC_FLAG_SECDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	return edac_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	if (pvt->dram_type == MEM_LRDDR3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		u32 dcsm = pvt->csels[chan].csmasks[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		 * It's assumed all LRDIMMs in a DCT are going to be of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		 * same 'type' until proven otherwise. So, use a cs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		 * value of '0' here to get dcsm value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	edac_dbg(1, "All DIMMs support ECC:%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		    (dclr & BIT(19)) ? "yes" : "no");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	edac_dbg(1, "  PAR/ERR parity: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		 (dclr & BIT(8)) ?  "enabled" : "disabled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	if (pvt->fam == 0x10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		edac_dbg(1, "  DCT 128bit mode width: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 			 (dclr & BIT(11)) ?  "128b" : "64b");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	edac_dbg(1, "  x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		 (dclr & BIT(12)) ?  "yes" : "no",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		 (dclr & BIT(13)) ?  "yes" : "no",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		 (dclr & BIT(14)) ?  "yes" : "no",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		 (dclr & BIT(15)) ?  "yes" : "no");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) #define CS_EVEN_PRIMARY		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) #define CS_ODD_PRIMARY		BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) #define CS_EVEN_SECONDARY	BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) #define CS_ODD_SECONDARY	BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) #define CS_3R_INTERLEAVE	BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) #define CS_EVEN			(CS_EVEN_PRIMARY | CS_EVEN_SECONDARY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) #define CS_ODD			(CS_ODD_PRIMARY | CS_ODD_SECONDARY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) static int f17_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	u8 base, count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	int cs_mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	if (csrow_enabled(2 * dimm, ctrl, pvt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		cs_mode |= CS_EVEN_PRIMARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	if (csrow_enabled(2 * dimm + 1, ctrl, pvt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		cs_mode |= CS_ODD_PRIMARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	/* Asymmetric dual-rank DIMM support. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		cs_mode |= CS_ODD_SECONDARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	 * 3 Rank inteleaving support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	 * There should be only three bases enabled and their two masks should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	 * be equal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	for_each_chip_select(base, ctrl, pvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 		count += csrow_enabled(base, ctrl, pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	if (count == 3 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	    pvt->csels[ctrl].csmasks[0] == pvt->csels[ctrl].csmasks[1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		edac_dbg(1, "3R interleaving in use.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		cs_mode |= CS_3R_INTERLEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	return cs_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	int dimm, size0, size1, cs0, cs1, cs_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	for (dimm = 0; dimm < 2; dimm++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		cs0 = dimm * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		cs1 = dimm * 2 + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 		cs_mode = f17_get_cs_mode(dimm, ctrl, pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		size0 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		size1 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 				cs0,	size0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 				cs1,	size1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) static void __dump_misc_regs_df(struct amd64_pvt *pvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	struct amd64_umc *umc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	u32 i, tmp, umc_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	for_each_umc(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		umc_base = get_umc_base(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		umc = &pvt->umc[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		edac_dbg(1, "UMC%d ECC bad symbol: 0x%x\n", i, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		edac_dbg(1, "UMC%d UMC cap: 0x%x\n", i, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 				i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 				    (umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 				i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 				i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 				i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		if (pvt->dram_type == MEM_LRDDR4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 			amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ADDR_CFG, &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 			edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 					i, 1 << ((tmp >> 4) & 0x3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		debug_display_dimm_sizes_df(pvt, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	edac_dbg(1, "F0x104 (DRAM Hole Address): 0x%08x, base: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		 pvt->dhar, dhar_base(pvt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) /* Display and decode various NB registers for debug purposes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) static void __dump_misc_regs(struct amd64_pvt *pvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	edac_dbg(1, "  NB two channel DRAM capable: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	edac_dbg(1, "  ECC capable: %s, ChipKill ECC capable: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		 pvt->dhar, dhar_base(pvt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 				   : f10_dhar_offset(pvt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	debug_display_dimm_sizes(pvt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	/* everything below this point is Fam10h and above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	if (pvt->fam == 0xf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	debug_display_dimm_sizes(pvt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	/* Only if NOT ganged does dclr1 have valid info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	if (!dct_ganging_enabled(pvt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) /* Display and decode various NB registers for debug purposes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) static void dump_misc_regs(struct amd64_pvt *pvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	if (pvt->umc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		__dump_misc_regs_df(pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		__dump_misc_regs(pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	edac_dbg(1, "  DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939)  * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) static void prep_chip_selects(struct amd64_pvt *pvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	} else if (pvt->fam == 0x15 && pvt->model == 0x30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	} else if (pvt->fam >= 0x17) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		int umc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		for_each_umc(umc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 			pvt->csels[umc].b_cnt = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 			pvt->csels[umc].m_cnt = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) static void read_umc_base_mask(struct amd64_pvt *pvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	u32 umc_base_reg, umc_base_reg_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	u32 umc_mask_reg, umc_mask_reg_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	u32 base_reg, base_reg_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	u32 mask_reg, mask_reg_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	u32 *base, *base_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	u32 *mask, *mask_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	int cs, umc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	for_each_umc(umc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		umc_base_reg_sec = get_umc_base(umc) + UMCCH_BASE_ADDR_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		for_each_chip_select(cs, umc, pvt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 			base = &pvt->csels[umc].csbases[cs];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 			base_sec = &pvt->csels[umc].csbases_sec[cs];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 			base_reg = umc_base_reg + (cs * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 			base_reg_sec = umc_base_reg_sec + (cs * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 			if (!amd_smn_read(pvt->mc_node_id, base_reg, base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 				edac_dbg(0, "  DCSB%d[%d]=0x%08x reg: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 					 umc, cs, *base, base_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 			if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, base_sec))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 				edac_dbg(0, "    DCSB_SEC%d[%d]=0x%08x reg: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 					 umc, cs, *base_sec, base_reg_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		umc_mask_reg_sec = get_umc_base(umc) + UMCCH_ADDR_MASK_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		for_each_chip_select_mask(cs, umc, pvt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 			mask = &pvt->csels[umc].csmasks[cs];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 			mask_sec = &pvt->csels[umc].csmasks_sec[cs];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 			mask_reg = umc_mask_reg + (cs * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 			mask_reg_sec = umc_mask_reg_sec + (cs * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 			if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 				edac_dbg(0, "  DCSM%d[%d]=0x%08x reg: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 					 umc, cs, *mask, mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 			if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, mask_sec))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 				edac_dbg(0, "    DCSM_SEC%d[%d]=0x%08x reg: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 					 umc, cs, *mask_sec, mask_reg_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)  * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) static void read_dct_base_mask(struct amd64_pvt *pvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	int cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	prep_chip_selects(pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	if (pvt->umc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		return read_umc_base_mask(pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	for_each_chip_select(cs, 0, pvt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		int reg0   = DCSB0 + (cs * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		int reg1   = DCSB1 + (cs * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		u32 *base0 = &pvt->csels[0].csbases[cs];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		u32 *base1 = &pvt->csels[1].csbases[cs];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 			edac_dbg(0, "  DCSB0[%d]=0x%08x reg: F2x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 				 cs, *base0, reg0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		if (pvt->fam == 0xf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 			edac_dbg(0, "  DCSB1[%d]=0x%08x reg: F2x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 				 cs, *base1, (pvt->fam == 0x10) ? reg1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 							: reg0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	for_each_chip_select_mask(cs, 0, pvt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		int reg0   = DCSM0 + (cs * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		int reg1   = DCSM1 + (cs * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		u32 *mask0 = &pvt->csels[0].csmasks[cs];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		u32 *mask1 = &pvt->csels[1].csmasks[cs];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 			edac_dbg(0, "    DCSM0[%d]=0x%08x reg: F2x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 				 cs, *mask0, reg0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		if (pvt->fam == 0xf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 			edac_dbg(0, "    DCSM1[%d]=0x%08x reg: F2x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 				 cs, *mask1, (pvt->fam == 0x10) ? reg1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 							: reg0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) static void determine_memory_type(struct amd64_pvt *pvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	u32 dram_ctrl, dcsm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	if (pvt->umc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 			pvt->dram_type = MEM_LRDDR4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 			pvt->dram_type = MEM_RDDR4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 			pvt->dram_type = MEM_DDR4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	switch (pvt->fam) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	case 0xf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		if (pvt->ext_model >= K8_REV_F)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 			goto ddr3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	case 0x10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		if (pvt->dchr0 & DDR3_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 			goto ddr3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	case 0x15:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		if (pvt->model < 0x60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 			goto ddr3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		 * Model 0x60h needs special handling:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		 * We use a Chip Select value of '0' to obtain dcsm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		 * Theoretically, it is possible to populate LRDIMMs of different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		 * 'Rank' value on a DCT. But this is not the common case. So,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 		 * it's reasonable to assume all DIMMs are going to be of same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 		 * 'type' until proven otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		dcsm = pvt->csels[0].csmasks[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		if (((dram_ctrl >> 8) & 0x7) == 0x2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 			pvt->dram_type = MEM_DDR4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		else if (pvt->dclr0 & BIT(16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 			pvt->dram_type = MEM_DDR3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		else if (dcsm & 0x3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 			pvt->dram_type = MEM_LRDDR3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 			pvt->dram_type = MEM_RDDR3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	case 0x16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		goto ddr3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 		pvt->dram_type = MEM_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) ddr3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) /* Get the number of DCT channels the memory controller is using. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) static int k8_early_channel_count(struct amd64_pvt *pvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	int flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	if (pvt->ext_model >= K8_REV_F)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		/* RevF (NPT) and later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		flag = pvt->dclr0 & WIDTH_128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 		/* RevE and earlier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		flag = pvt->dclr0 & REVE_WIDTH_128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	/* not used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	pvt->dclr1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	return (flag) ? 2 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	u16 mce_nid = amd_get_nb_id(m->extcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	struct mem_ctl_info *mci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	u8 start_bit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	u8 end_bit   = 47;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	mci = edac_mc_find(mce_nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	if (!mci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	pvt = mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	if (pvt->fam == 0xf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		start_bit = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 		end_bit   = 39;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	addr = m->addr & GENMASK_ULL(end_bit, start_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	 * Erratum 637 workaround
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	if (pvt->fam == 0x15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		u64 cc6_base, tmp_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 		u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		u8 intlv_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 		if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 			return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 		amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 		intlv_en = tmp >> 21 & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 		/* add [47:27] + 3 trailing bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		cc6_base  = (tmp & GENMASK_ULL(20, 0)) << 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		/* reverse and add DramIntlvEn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		cc6_base |= intlv_en ^ 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 		/* pin at [47:24] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		cc6_base <<= 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		if (!intlv_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 			return cc6_base | (addr & GENMASK_ULL(23, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 							/* faster log2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		tmp_addr  = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 		/* OR DramIntlvSel into bits [14:12] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		/* add remaining [11:0] bits from original MC4_ADDR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 		tmp_addr |= addr & GENMASK_ULL(11, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		return cc6_base | tmp_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) static struct pci_dev *pci_get_related_function(unsigned int vendor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 						unsigned int device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 						struct pci_dev *related)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	struct pci_dev *dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	while ((dev = pci_get_device(vendor, device, dev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		    (dev->bus->number == related->bus->number) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 		    (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	return dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	struct amd_northbridge *nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	struct pci_dev *f1 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	unsigned int pci_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	int off = range << 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	u32 llim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off,  &pvt->ranges[range].base.lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	if (pvt->fam == 0xf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	if (!dram_rw(pvt, range))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off,  &pvt->ranges[range].base.hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	/* F15h: factor in CC6 save area by reading dst node's limit reg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	if (pvt->fam != 0x15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	nb = node_to_amd_nb(dram_dst_node(pvt, range));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	if (WARN_ON(!nb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	if (pvt->model == 0x60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		pci_func = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	else if (pvt->model == 0x30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 		pci_func = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 		pci_func = PCI_DEVICE_ID_AMD_15H_NB_F1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	if (WARN_ON(!f1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 				    /* {[39:27],111b} */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 				    /* [47:40] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	pvt->ranges[range].lim.hi |= llim >> 13;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	pci_dev_put(f1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 				    struct err_info *err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	struct amd64_pvt *pvt = mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	error_address_to_page_and_offset(sys_addr, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	 * Find out which node the error address belongs to. This may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	 * different from the node that detected the error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	if (!err->src_mci) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 			     (unsigned long)sys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 		err->err_code = ERR_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	/* Now map the sys_addr to a CSROW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	if (err->csrow < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 		err->err_code = ERR_CSROW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	/* CHIPKILL enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	if (pvt->nbcfg & NBCFG_CHIPKILL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 		err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		if (err->channel < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 			 * Syndrome didn't map, so we don't know which of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 			 * 2 DIMMs is in error. So we need to ID 'both' of them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 			 * as suspect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 			amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 				      "possible error reporting race\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 				      err->syndrome);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 			err->err_code = ERR_CHANNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 		 * non-chipkill ecc mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		 * The k8 documentation is unclear about how to determine the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 		 * channel number when using non-chipkill memory.  This method
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 		 * was obtained from email communication with someone at AMD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 		 * (Wish the email was placed in this comment - norsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 		err->channel = ((sys_addr & BIT(3)) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) static int ddr2_cs_size(unsigned i, bool dct_width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	unsigned shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	if (i <= 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 		shift = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	else if (!(i & 0x1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 		shift = i >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 		shift = (i + 1) >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	return 128 << (shift + !!dct_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 				  unsigned cs_mode, int cs_mask_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	if (pvt->ext_model >= K8_REV_F) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 		WARN_ON(cs_mode > 11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 		return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	else if (pvt->ext_model >= K8_REV_D) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 		unsigned diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		WARN_ON(cs_mode > 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 		 * the below calculation, besides trying to win an obfuscated C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 		 * contest, maps cs_mode values to DIMM chip select sizes. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 		 * mappings are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 		 * cs_mode	CS size (mb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 		 * =======	============
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 		 * 0		32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 		 * 1		64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 		 * 2		128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 		 * 3		128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 		 * 4		256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 		 * 5		512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 		 * 6		256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 		 * 7		512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 		 * 8		1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		 * 9		1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 		 * 10		2048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 		 * Basically, it calculates a value with which to shift the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 		 * smallest CS size of 32MB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 		 * ddr[23]_cs_size have a similar purpose.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		diff = cs_mode/3 + (unsigned)(cs_mode > 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 		return 32 << (cs_mode - diff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 		WARN_ON(cs_mode > 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 		return 32 << cs_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)  * Get the number of DCT channels in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)  *	number of Memory Channels in operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)  * Pass back:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)  *	contents of the DCL0_LOW register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) static int f1x_early_channel_count(struct amd64_pvt *pvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	int i, j, channels = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	/* On F10h, if we are in 128 bit mode, then we are using 2 channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 		return 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	 * Need to check if in unganged mode: In such, there are 2 channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	 * but they are not in 128 bit mode and thus the above 'dclr0' status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	 * bit will be OFF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	 * their CSEnable bit on. If so, then SINGLE DIMM case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	 * Check DRAM Bank Address Mapping values for each DIMM to see if there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	 * is more than just one DIMM present in unganged mode. Need to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	 * both controllers since DIMMs can be placed in either one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	for (i = 0; i < 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 		u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 		for (j = 0; j < 4; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 			if (DBAM_DIMM(j, dbam) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 				channels++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	if (channels > 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		channels = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	amd64_info("MCT channel count: %d\n", channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	return channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) static int f17_early_channel_count(struct amd64_pvt *pvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	int i, channels = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	/* SDP Control bit 31 (SdpInit) is clear for unused UMC channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	for_each_umc(i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	amd64_info("MCT channel count: %d\n", channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	return channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) static int ddr3_cs_size(unsigned i, bool dct_width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	unsigned shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	int cs_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	if (i == 0 || i == 3 || i == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		cs_size = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	else if (i <= 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 		shift = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	else if (i == 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		shift = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	else if (!(i & 0x1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		shift = i >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 		shift = (i + 1) >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	if (cs_size != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 		cs_size = (128 * (1 << !!dct_width)) << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	return cs_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) static int ddr3_lrdimm_cs_size(unsigned i, unsigned rank_multiply)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	unsigned shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	int cs_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	if (i < 4 || i == 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 		cs_size = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	else if (i == 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 		shift = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	else if (!(i & 0x1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 		shift = i >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		shift = (i + 1) >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	if (cs_size != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 		cs_size = rank_multiply * (128 << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	return cs_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) static int ddr4_cs_size(unsigned i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	int cs_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	if (i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 		cs_size = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	else if (i == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 		cs_size = 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 		/* Min cs_size = 1G */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 		cs_size = 1024 * (1 << (i >> 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	return cs_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 				   unsigned cs_mode, int cs_mask_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	WARN_ON(cs_mode > 11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 		return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 		return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)  * F15h supports only 64bit DCT interfaces
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 				   unsigned cs_mode, int cs_mask_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	WARN_ON(cs_mode > 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	return ddr3_cs_size(cs_mode, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) /* F15h M60h supports DDR4 mapping as well.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 					unsigned cs_mode, int cs_mask_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	int cs_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	WARN_ON(cs_mode > 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	if (pvt->dram_type == MEM_DDR4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 		if (cs_mode > 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 		cs_size = ddr4_cs_size(cs_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	} else if (pvt->dram_type == MEM_LRDDR3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 		unsigned rank_multiply = dcsm & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 		if (rank_multiply == 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 			rank_multiply = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 		cs_size = ddr3_lrdimm_cs_size(cs_mode, rank_multiply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 		/* Minimum cs size is 512mb for F15hM60h*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 		if (cs_mode == 0x1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 		cs_size = ddr3_cs_size(cs_mode, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	return cs_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)  * F16h and F15h model 30h have only limited cs_modes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 				unsigned cs_mode, int cs_mask_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	WARN_ON(cs_mode > 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	if (cs_mode == 6 || cs_mode == 8 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	    cs_mode == 9 || cs_mode == 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 		return ddr3_cs_size(cs_mode, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 				    unsigned int cs_mode, int csrow_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	u32 addr_mask_orig, addr_mask_deinterleaved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	u32 msb, weight, num_zero_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	int dimm, size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	/* No Chip Selects are enabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	if (!cs_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 		return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	/* Requested size of an even CS but none are enabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	if (!(cs_mode & CS_EVEN) && !(csrow_nr & 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 		return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	/* Requested size of an odd CS but none are enabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	if (!(cs_mode & CS_ODD) && (csrow_nr & 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 		return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	 * There is one mask per DIMM, and two Chip Selects per DIMM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	 *	CS0 and CS1 -> DIMM0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	 *	CS2 and CS3 -> DIMM1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	dimm = csrow_nr >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	/* Asymmetric dual-rank DIMM support. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 		addr_mask_orig = pvt->csels[umc].csmasks_sec[dimm];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 		addr_mask_orig = pvt->csels[umc].csmasks[dimm];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	 * The number of zero bits in the mask is equal to the number of bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	 * in a full mask minus the number of bits in the current mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	 * The MSB is the number of bits in the full mask because BIT[0] is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	 * always 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	 * In the special 3 Rank interleaving case, a single bit is flipped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	 * without swapping with the most significant bit. This can be handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	 * by keeping the MSB where it is and ignoring the single zero bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	msb = fls(addr_mask_orig) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	weight = hweight_long(addr_mask_orig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	/* Take the number of zero bits off from the top of the mask. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	edac_dbg(1, "  Original AddrMask: 0x%x\n", addr_mask_orig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	edac_dbg(1, "  Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	/* Register [31:1] = Address [39:9]. Size is in kBs here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	size = (addr_mask_deinterleaved >> 2) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	/* Return size in MBs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	return size >> 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) static void read_dram_ctl_register(struct amd64_pvt *pvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	if (pvt->fam == 0xf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 		edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 			 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 		edac_dbg(0, "  DCTs operate in %s mode\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 			 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 		if (!dct_ganging_enabled(pvt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 			edac_dbg(0, "  Address range split per DCT: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 				 (dct_high_range_enabled(pvt) ? "yes" : "no"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 		edac_dbg(0, "  data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 			 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 			 (dct_memory_cleared(pvt) ? "yes" : "no"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 		edac_dbg(0, "  channel interleave: %s, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 			 "interleave bits selector: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 			 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 			 dct_sel_interleave_addr(pvt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)  * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)  * 2.10.12 Memory Interleaving Modes).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 				     u8 intlv_en, int num_dcts_intlv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 				     u32 dct_sel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	u8 channel = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	u8 select;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	if (!(intlv_en))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 		return (u8)(dct_sel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	if (num_dcts_intlv == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 		select = (sys_addr >> 8) & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 		channel = select ? 0x3 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	} else if (num_dcts_intlv == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 		u8 intlv_addr = dct_sel_interleave_addr(pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 		switch (intlv_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 		case 0x4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 			channel = (sys_addr >> 8) & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 		case 0x5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 			channel = (sys_addr >> 9) & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	return channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)  * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)  * Interleaving Modes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 				bool hi_range_sel, u8 intlv_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	if (dct_ganging_enabled(pvt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	if (hi_range_sel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 		return dct_sel_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	 * see F2x110[DctSelIntLvAddr] - channel interleave mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	if (dct_interleave_enabled(pvt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 		u8 intlv_addr = dct_sel_interleave_addr(pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 		/* return DCT select function: 0=DCT0, 1=DCT1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 		if (!intlv_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 			return sys_addr >> 6 & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 		if (intlv_addr & 0x2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 			u8 shift = intlv_addr & 0x1 ? 9 : 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 			u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 			return ((sys_addr >> shift) & 1) ^ temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 		if (intlv_addr & 0x4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 			u8 shift = intlv_addr & 0x1 ? 9 : 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 			return (sys_addr >> shift) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 		return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	if (dct_high_range_enabled(pvt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 		return ~dct_sel_high & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) /* Convert the sys_addr to the normalized DCT address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 				 u64 sys_addr, bool hi_rng,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 				 u32 dct_sel_base_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 	u64 chan_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	u64 dram_base		= get_dram_base(pvt, range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	u64 hole_off		= f10_dhar_offset(pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 	u64 dct_sel_base_off	= (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	if (hi_rng) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 		 * if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 		 * base address of high range is below 4Gb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 		 * (bits [47:27] at [31:11])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 		 * DRAM address space on this DCT is hoisted above 4Gb	&&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 		 * sys_addr > 4Gb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 		 *	remove hole offset from sys_addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 		 * else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 		 *	remove high range offset from sys_addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 		if ((!(dct_sel_base_addr >> 16) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 		     dct_sel_base_addr < dhar_base(pvt)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 		    dhar_valid(pvt) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 		    (sys_addr >= BIT_64(32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 			chan_off = hole_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 			chan_off = dct_sel_base_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 		 * if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 		 * we have a valid hole		&&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 		 * sys_addr > 4Gb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 		 *	remove hole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 		 * else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 		 *	remove dram base to normalize to DCT address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 		if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 			chan_off = hole_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 			chan_off = dram_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)  * checks if the csrow passed in is marked as SPARED, if so returns the new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)  * spare row
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	int tmp_cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	if (online_spare_swap_done(pvt, dct) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	    csrow == online_spare_bad_dramcs(pvt, dct)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 		for_each_chip_select(tmp_cs, dct, pvt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 			if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 				csrow = tmp_cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	return csrow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)  * Iterate over the DRAM DCT "base" and "mask" registers looking for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)  * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836)  *	-EINVAL:  NOT FOUND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837)  *	0..csrow = Chip-Select Row
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	struct mem_ctl_info *mci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 	struct amd64_pvt *pvt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	u64 cs_base, cs_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	int cs_found = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 	int csrow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 	mci = edac_mc_find(nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 	if (!mci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 		return cs_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	pvt = mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	for_each_chip_select(csrow, dct, pvt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 		if (!csrow_enabled(csrow, dct, pvt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 		get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 		edac_dbg(1, "    CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 			 csrow, cs_base, cs_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 		cs_mask = ~cs_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 		edac_dbg(1, "    (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 			 (in_addr & cs_mask), (cs_base & cs_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 		if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 			if (pvt->fam == 0x15 && pvt->model >= 0x30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 				cs_found =  csrow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 			cs_found = f10_process_possible_spare(pvt, dct, csrow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 			edac_dbg(1, " MATCH csrow=%d\n", cs_found);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	return cs_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)  * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)  * swapped with a region located at the bottom of memory so that the GPU can use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886)  * the interleaved region and thus two channels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 	u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	if (pvt->fam == 0x10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 		/* only revC3 and revE have that feature */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 		if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 			return sys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 	amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	if (!(swap_reg & 0x1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 		return sys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	swap_base	= (swap_reg >> 3) & 0x7f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 	swap_limit	= (swap_reg >> 11) & 0x7f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	rgn_size	= (swap_reg >> 20) & 0x7f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	tmp_addr	= sys_addr >> 27;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	if (!(sys_addr >> 34) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	    (((tmp_addr >= swap_base) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 	     (tmp_addr <= swap_limit)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	     (tmp_addr < rgn_size)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 		return sys_addr ^ (u64)swap_base << 27;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	return sys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) /* For a given @dram_range, check if @sys_addr falls within it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 				  u64 sys_addr, int *chan_sel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 	int cs_found = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 	u64 chan_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 	u32 dct_sel_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 	u8 channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 	bool high_range = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 	u8 node_id    = dram_dst_node(pvt, range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 	u8 intlv_en   = dram_intlv_en(pvt, range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 	u32 intlv_sel = dram_intlv_sel(pvt, range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 		 range, sys_addr, get_dram_limit(pvt, range));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 	if (dhar_valid(pvt) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 	    dhar_base(pvt) <= sys_addr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	    sys_addr < BIT_64(32)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 		amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 			    sys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	dct_sel_base = dct_sel_baseaddr(pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 	 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	 * select between DCT0 and DCT1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	if (dct_high_range_enabled(pvt) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	   !dct_ganging_enabled(pvt) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	   ((sys_addr >> 27) >= (dct_sel_base >> 11)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 		high_range = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 					  high_range, dct_sel_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 	/* Remove node interleaving, see F1x120 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	if (intlv_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 		chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 			    (chan_addr & 0xfff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 	/* remove channel interleave */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 	if (dct_interleave_enabled(pvt) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 	   !dct_high_range_enabled(pvt) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	   !dct_ganging_enabled(pvt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 		if (dct_sel_interleave_addr(pvt) != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 			if (dct_sel_interleave_addr(pvt) == 0x3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 				/* hash 9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 				chan_addr = ((chan_addr >> 10) << 9) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 					     (chan_addr & 0x1ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 				/* A[6] or hash 6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 				chan_addr = ((chan_addr >> 7) << 6) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 					     (chan_addr & 0x3f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 			/* A[12] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 			chan_addr = ((chan_addr >> 13) << 12) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 				     (chan_addr & 0xfff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	edac_dbg(1, "   Normalized DCT addr: 0x%llx\n", chan_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 	cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 	if (cs_found >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 		*chan_sel = channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 	return cs_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 					u64 sys_addr, int *chan_sel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	int cs_found = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 	int num_dcts_intlv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 	u64 chan_addr, chan_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 	u64 dct_base, dct_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 	u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 	u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	u64 dhar_offset		= f10_dhar_offset(pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 	u8 intlv_addr		= dct_sel_interleave_addr(pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	u8 node_id		= dram_dst_node(pvt, range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	u8 intlv_en		= dram_intlv_en(pvt, range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 	amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 	amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 	dct_offset_en		= (u8) ((dct_cont_base_reg >> 3) & BIT(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 	dct_sel			= (u8) ((dct_cont_base_reg >> 4) & 0x7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 	edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 		 range, sys_addr, get_dram_limit(pvt, range));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 	if (!(get_dram_base(pvt, range)  <= sys_addr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 	    !(get_dram_limit(pvt, range) >= sys_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 	if (dhar_valid(pvt) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	    dhar_base(pvt) <= sys_addr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	    sys_addr < BIT_64(32)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 		amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 			    sys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 	/* Verify sys_addr is within DCT Range. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 	dct_base = (u64) dct_sel_baseaddr(pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 	dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 	if (!(dct_cont_base_reg & BIT(0)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	    !(dct_base <= (sys_addr >> 27) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 	      dct_limit >= (sys_addr >> 27)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 	/* Verify number of dct's that participate in channel interleaving. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	num_dcts_intlv = (int) hweight8(intlv_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 	if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 	if (pvt->model >= 0x60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 		channel = f1x_determine_channel(pvt, sys_addr, false, intlv_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 		channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 						     num_dcts_intlv, dct_sel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	/* Verify we stay within the MAX number of channels allowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	if (channel > 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 	leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	/* Get normalized DCT addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 	if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 		chan_offset = dhar_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 		chan_offset = dct_base << 27;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	chan_addr = sys_addr - chan_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 	/* remove channel interleave */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	if (num_dcts_intlv == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 		if (intlv_addr == 0x4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 			chan_addr = ((chan_addr >> 9) << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 						(chan_addr & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 		else if (intlv_addr == 0x5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 			chan_addr = ((chan_addr >> 10) << 9) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 						(chan_addr & 0x1ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 	} else if (num_dcts_intlv == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 		if (intlv_addr == 0x4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 			chan_addr = ((chan_addr >> 10) << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 							(chan_addr & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 		else if (intlv_addr == 0x5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 			chan_addr = ((chan_addr >> 11) << 9) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 							(chan_addr & 0x1ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 	if (dct_offset_en) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 		amd64_read_pci_cfg(pvt->F1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 				   DRAM_CONT_HIGH_OFF + (int) channel * 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 				   &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 		chan_addr +=  (u64) ((tmp >> 11) & 0xfff) << 27;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 	f15h_select_dct(pvt, channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 	edac_dbg(1, "   Normalized DCT addr: 0x%llx\n", chan_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	 * Find Chip select:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 	 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 	 * there is support for 4 DCT's, but only 2 are currently functional.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 	 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	 * pvt->csels[1]. So we need to use '1' here to get correct info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 	alias_channel =  (channel == 3) ? 1 : channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 	cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 	if (cs_found >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 		*chan_sel = alias_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	return cs_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 					u64 sys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 					int *chan_sel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 	int cs_found = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 	unsigned range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 	for (range = 0; range < DRAM_RANGES; range++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 		if (!dram_rw(pvt, range))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 		if (pvt->fam == 0x15 && pvt->model >= 0x30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 			cs_found = f15_m30h_match_to_this_node(pvt, range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 							       sys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 							       chan_sel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 		else if ((get_dram_base(pvt, range)  <= sys_addr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 			 (get_dram_limit(pvt, range) >= sys_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 			cs_found = f1x_match_to_this_node(pvt, range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 							  sys_addr, chan_sel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 			if (cs_found >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 	return cs_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)  * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149)  * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)  * The @sys_addr is usually an error address received from the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152)  * (MCX_ADDR).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 				     struct err_info *err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 	struct amd64_pvt *pvt = mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 	error_address_to_page_and_offset(sys_addr, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 	err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 	if (err->csrow < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 		err->err_code = ERR_CSROW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 	 * We need the syndromes for channel detection only when we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 	 * ganged. Otherwise @chan should already contain the channel at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 	 * this point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 	if (dct_ganging_enabled(pvt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 		err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)  * debug routine to display the memory sizes of all logical DIMMs and its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178)  * CSROWs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 	int dimm, size0, size1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 	u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 	u32 dbam  = ctrl ? pvt->dbam1 : pvt->dbam0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 	if (pvt->fam == 0xf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 		/* K8 families < revF not supported yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 	       if (pvt->ext_model < K8_REV_F)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 	       else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 		       WARN_ON(ctrl != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 	if (pvt->fam == 0x10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 		dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 							   : pvt->dbam0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 		dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 				 pvt->csels[1].csbases :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 				 pvt->csels[0].csbases;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 	} else if (ctrl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 		dbam = pvt->dbam0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 		dcsb = pvt->csels[1].csbases;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 	edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 		 ctrl, dbam);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 	edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 	/* Dump memory sizes for DIMM and its CSROWs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 	for (dimm = 0; dimm < 4; dimm++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 		size0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 		if (dcsb[dimm*2] & DCSB_CS_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 			 * For F15m60h, we need multiplier for LRDIMM cs_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 			 * calculation. We pass dimm value to the dbam_to_cs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 			 * mapper so we can find the multiplier from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 			 * corresponding DCSM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 			size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 						     DBAM_DIMM(dimm, dbam),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 						     dimm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 		size1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 		if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 			size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 						     DBAM_DIMM(dimm, dbam),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 						     dimm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 		amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 				dimm * 2,     size0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 				dimm * 2 + 1, size1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) static struct amd64_family_type family_types[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 	[K8_CPUS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 		.ctl_name = "K8",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 		.f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 		.f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 		.max_mcs = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 		.ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 			.early_channel_count	= k8_early_channel_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 			.map_sysaddr_to_csrow	= k8_map_sysaddr_to_csrow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 			.dbam_to_cs		= k8_dbam_to_chip_select,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 	[F10_CPUS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 		.ctl_name = "F10h",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 		.f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 		.f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 		.max_mcs = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 		.ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 			.early_channel_count	= f1x_early_channel_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 			.dbam_to_cs		= f10_dbam_to_chip_select,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 	[F15_CPUS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 		.ctl_name = "F15h",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 		.f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 		.f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 		.max_mcs = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 		.ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 			.early_channel_count	= f1x_early_channel_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 			.dbam_to_cs		= f15_dbam_to_chip_select,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 	[F15_M30H_CPUS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 		.ctl_name = "F15h_M30h",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 		.f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 		.f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 		.max_mcs = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 		.ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 			.early_channel_count	= f1x_early_channel_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 			.dbam_to_cs		= f16_dbam_to_chip_select,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 	[F15_M60H_CPUS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 		.ctl_name = "F15h_M60h",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 		.f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 		.f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 		.max_mcs = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 		.ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 			.early_channel_count	= f1x_early_channel_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 			.dbam_to_cs		= f15_m60h_dbam_to_chip_select,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 	[F16_CPUS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 		.ctl_name = "F16h",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 		.f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 		.f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 		.max_mcs = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 		.ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 			.early_channel_count	= f1x_early_channel_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 			.dbam_to_cs		= f16_dbam_to_chip_select,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 	[F16_M30H_CPUS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 		.ctl_name = "F16h_M30h",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 		.f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 		.f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 		.max_mcs = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 		.ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 			.early_channel_count	= f1x_early_channel_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 			.dbam_to_cs		= f16_dbam_to_chip_select,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 	[F17_CPUS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 		.ctl_name = "F17h",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 		.f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 		.f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 		.max_mcs = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 		.ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 			.early_channel_count	= f17_early_channel_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 	[F17_M10H_CPUS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 		.ctl_name = "F17h_M10h",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 		.f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 		.f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 		.max_mcs = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 		.ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 			.early_channel_count	= f17_early_channel_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 	[F17_M30H_CPUS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 		.ctl_name = "F17h_M30h",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 		.f0_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 		.f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 		.max_mcs = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 		.ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 			.early_channel_count	= f17_early_channel_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 	[F17_M60H_CPUS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 		.ctl_name = "F17h_M60h",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 		.f0_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 		.f6_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 		.max_mcs = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 		.ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 			.early_channel_count	= f17_early_channel_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 	[F17_M70H_CPUS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 		.ctl_name = "F17h_M70h",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 		.f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 		.f6_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 		.max_mcs = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 		.ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 			.early_channel_count	= f17_early_channel_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 	[F19_CPUS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 		.ctl_name = "F19h",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 		.f0_id = PCI_DEVICE_ID_AMD_19H_DF_F0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 		.f6_id = PCI_DEVICE_ID_AMD_19H_DF_F6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 		.max_mcs = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 		.ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 			.early_channel_count	= f17_early_channel_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377)  * These are tables of eigenvectors (one per line) which can be used for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378)  * construction of the syndrome tables. The modified syndrome search algorithm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379)  * uses those to find the symbol in error and thus the DIMM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381)  * Algorithm courtesy of Ross LaFetra from AMD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) static const u16 x4_vectors[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 	0x2f57, 0x1afe, 0x66cc, 0xdd88,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 	0x11eb, 0x3396, 0x7f4c, 0xeac8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 	0x0001, 0x0002, 0x0004, 0x0008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 	0x1013, 0x3032, 0x4044, 0x8088,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 	0x106b, 0x30d6, 0x70fc, 0xe0a8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 	0x4857, 0xc4fe, 0x13cc, 0x3288,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 	0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 	0x1f39, 0x251e, 0xbd6c, 0x6bd8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 	0x15c1, 0x2a42, 0x89ac, 0x4758,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 	0x2b03, 0x1602, 0x4f0c, 0xca08,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 	0x1f07, 0x3a0e, 0x6b04, 0xbd08,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 	0x8ba7, 0x465e, 0x244c, 0x1cc8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 	0x2b87, 0x164e, 0x642c, 0xdc18,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 	0x40b9, 0x80de, 0x1094, 0x20e8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 	0x27db, 0x1eb6, 0x9dac, 0x7b58,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 	0x11c1, 0x2242, 0x84ac, 0x4c58,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 	0x1be5, 0x2d7a, 0x5e34, 0xa718,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 	0x4b39, 0x8d1e, 0x14b4, 0x28d8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 	0x4c97, 0xc87e, 0x11fc, 0x33a8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 	0x8e97, 0x497e, 0x2ffc, 0x1aa8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 	0x16b3, 0x3d62, 0x4f34, 0x8518,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 	0x1e2f, 0x391a, 0x5cac, 0xf858,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 	0x1d9f, 0x3b7a, 0x572c, 0xfe18,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 	0x15f5, 0x2a5a, 0x5264, 0xa3b8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 	0x1dbb, 0x3b66, 0x715c, 0xe3f8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 	0x4397, 0xc27e, 0x17fc, 0x3ea8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 	0x1617, 0x3d3e, 0x6464, 0xb8b8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 	0x23ff, 0x12aa, 0xab6c, 0x56d8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 	0x2dfb, 0x1ba6, 0x913c, 0x7328,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 	0x185d, 0x2ca6, 0x7914, 0x9e28,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 	0x171b, 0x3e36, 0x7d7c, 0xebe8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 	0x4199, 0x82ee, 0x19f4, 0x2e58,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 	0x4807, 0xc40e, 0x130c, 0x3208,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 	0x1905, 0x2e0a, 0x5804, 0xac08,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 	0x213f, 0x132a, 0xadfc, 0x5ba8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 	0x19a9, 0x2efe, 0xb5cc, 0x6f88,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) static const u16 x8_vectors[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 	0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 	0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 	0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 	0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 	0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 	0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 	0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 	0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 	0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 	0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 	0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 	0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 	0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 	0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 	0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 	0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 	0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 	0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 	0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 			   unsigned v_dim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 	unsigned int i, err_sym;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 	for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 		u16 s = syndrome;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 		unsigned v_idx =  err_sym * v_dim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 		unsigned v_end = (err_sym + 1) * v_dim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 		/* walk over all 16 bits of the syndrome */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 		for (i = 1; i < (1U << 16); i <<= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 			/* if bit is set in that eigenvector... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 			if (v_idx < v_end && vectors[v_idx] & i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 				u16 ev_comp = vectors[v_idx++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 				/* ... and bit set in the modified syndrome, */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 				if (s & i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 					/* remove it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 					s ^= ev_comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 					if (!s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 						return err_sym;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 			} else if (s & i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 				/* can't get to zero, move to next symbol */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 	edac_dbg(0, "syndrome(%x) not found\n", syndrome);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) static int map_err_sym_to_channel(int err_sym, int sym_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 	if (sym_size == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 		switch (err_sym) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 		case 0x20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 		case 0x21:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 		case 0x22:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 		case 0x23:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 			return err_sym >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 	/* x8 symbols */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 		switch (err_sym) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 		/* imaginary bits not in a DIMM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 		case 0x10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 			WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 					  err_sym);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 		case 0x11:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 		case 0x12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 			return err_sym >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 	struct amd64_pvt *pvt = mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 	int err_sym = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 	if (pvt->ecc_sym_sz == 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 		err_sym = decode_syndrome(syndrome, x8_vectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 					  ARRAY_SIZE(x8_vectors),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 					  pvt->ecc_sym_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 	else if (pvt->ecc_sym_sz == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 		err_sym = decode_syndrome(syndrome, x4_vectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 					  ARRAY_SIZE(x4_vectors),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 					  pvt->ecc_sym_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 		amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 		return err_sym;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 	return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) static void __log_ecc_error(struct mem_ctl_info *mci, struct err_info *err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 			    u8 ecc_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 	enum hw_event_mc_err_type err_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 	const char *string;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 	if (ecc_type == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 		err_type = HW_EVENT_ERR_CORRECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 	else if (ecc_type == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 		err_type = HW_EVENT_ERR_UNCORRECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 	else if (ecc_type == 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 		err_type = HW_EVENT_ERR_DEFERRED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 		WARN(1, "Something is rotten in the state of Denmark.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 	switch (err->err_code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 	case DECODE_OK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 		string = "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 	case ERR_NODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 		string = "Failed to map error addr to a node";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 	case ERR_CSROW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 		string = "Failed to map error addr to a csrow";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 	case ERR_CHANNEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 		string = "Unknown syndrome - possible error reporting race";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 	case ERR_SYND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 		string = "MCA_SYND not valid - unknown syndrome and csrow";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 	case ERR_NORM_ADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 		string = "Cannot decode normalized address";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 		string = "WTF error";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 	edac_mc_handle_error(err_type, mci, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 			     err->page, err->offset, err->syndrome,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 			     err->csrow, err->channel, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 			     string, "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) static inline void decode_bus_error(int node_id, struct mce *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 	struct mem_ctl_info *mci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 	struct amd64_pvt *pvt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 	u8 ecc_type = (m->status >> 45) & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 	u8 xec = XEC(m->status, 0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 	u16 ec = EC(m->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 	u64 sys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 	struct err_info err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 	mci = edac_mc_find(node_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 	if (!mci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 	pvt = mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 	/* Bail out early if this was an 'observed' error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 	if (PP(ec) == NBSL_PP_OBS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 	/* Do only ECC errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 	if (xec && xec != F10_NBSL_EXT_ERR_ECC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 	memset(&err, 0, sizeof(err));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 	sys_addr = get_error_address(pvt, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 	if (ecc_type == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 		err.syndrome = extract_syndrome(m->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 	pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 	__log_ecc_error(mci, &err, ecc_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624)  * To find the UMC channel represented by this bank we need to match on its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625)  * instance_id. The instance_id of a bank is held in the lower 32 bits of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626)  * IPID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628)  * Currently, we can derive the channel number by looking at the 6th nibble in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629)  * the instance_id. For example, instance_id=0xYXXXXX where Y is the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630)  * number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) static int find_umc_channel(struct mce *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 	return (m->ipid & GENMASK(31, 0)) >> 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) static void decode_umc_error(int node_id, struct mce *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 	u8 ecc_type = (m->status >> 45) & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 	struct mem_ctl_info *mci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 	struct amd64_pvt *pvt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 	struct err_info err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 	u64 sys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 	mci = edac_mc_find(node_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 	if (!mci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 	pvt = mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 	memset(&err, 0, sizeof(err));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 	if (m->status & MCI_STATUS_DEFERRED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 		ecc_type = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 	err.channel = find_umc_channel(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 	if (!(m->status & MCI_STATUS_SYNDV)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 		err.err_code = ERR_SYND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 		goto log_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 	if (ecc_type == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 		u8 length = (m->synd >> 18) & 0x3f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 		if (length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 			err.syndrome = (m->synd >> 32) & GENMASK(length - 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 			err.err_code = ERR_CHANNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 	err.csrow = m->synd & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 	if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 		err.err_code = ERR_NORM_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 		goto log_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 	error_address_to_page_and_offset(sys_addr, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) log_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 	__log_ecc_error(mci, &err, ecc_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686)  * Use pvt->F3 which contains the F3 CPU PCI device to get the related
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687)  * F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688)  * Reserve F0 and F6 on systems with a UMC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 	if (pvt->umc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 		pvt->F0 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 		if (!pvt->F0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 			amd64_err("F0 not found, device 0x%x (broken BIOS?)\n", pci_id1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 			return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 		pvt->F6 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 		if (!pvt->F6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 			pci_dev_put(pvt->F0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 			pvt->F0 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 			amd64_err("F6 not found: device 0x%x (broken BIOS?)\n", pci_id2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 			return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 		if (!pci_ctl_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 			pci_ctl_dev = &pvt->F0->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 		edac_dbg(1, "F0: %s\n", pci_name(pvt->F0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 		edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 		edac_dbg(1, "F6: %s\n", pci_name(pvt->F6));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 	/* Reserve the ADDRESS MAP Device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 	pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 	if (!pvt->F1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 		amd64_err("F1 not found: device 0x%x (broken BIOS?)\n", pci_id1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 	/* Reserve the DCT Device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 	pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 	if (!pvt->F2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 		pci_dev_put(pvt->F1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 		pvt->F1 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 		amd64_err("F2 not found: device 0x%x (broken BIOS?)\n", pci_id2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 	if (!pci_ctl_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 		pci_ctl_dev = &pvt->F2->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 	edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 	edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 	edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) static void free_mc_sibling_devs(struct amd64_pvt *pvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 	if (pvt->umc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 		pci_dev_put(pvt->F0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 		pci_dev_put(pvt->F6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 		pci_dev_put(pvt->F1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 		pci_dev_put(pvt->F2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 	pvt->ecc_sym_sz = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 	if (pvt->umc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 		u8 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 		for_each_umc(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 			/* Check enabled channels only: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 			if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 				if (pvt->umc[i].ecc_ctrl & BIT(9)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 					pvt->ecc_sym_sz = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 					return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 				} else if (pvt->umc[i].ecc_ctrl & BIT(7)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 					pvt->ecc_sym_sz = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 					return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 	} else if (pvt->fam >= 0x10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 		u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 		amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 		/* F16h has only DCT0, so no need to read dbam1. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 		if (pvt->fam != 0x16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 			amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 		/* F10h, revD and later can do x8 ECC too. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 		if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 			pvt->ecc_sym_sz = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791)  * Retrieve the hardware registers of the memory controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) static void __read_mc_regs_df(struct amd64_pvt *pvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 	u8 nid = pvt->mc_node_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 	struct amd64_umc *umc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 	u32 i, umc_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 	/* Read registers from each UMC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 	for_each_umc(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 		umc_base = get_umc_base(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 		umc = &pvt->umc[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 		amd_smn_read(nid, umc_base + UMCCH_DIMM_CFG, &umc->dimm_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 		amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 		amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 		amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 		amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814)  * Retrieve the hardware registers of the memory controller (this includes the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815)  * 'Address Map' and 'Misc' device regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) static void read_mc_regs(struct amd64_pvt *pvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 	unsigned int range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 	u64 msr_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 	 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 	 * those are Read-As-Zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 	rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 	edac_dbg(0, "  TOP_MEM:  0x%016llx\n", pvt->top_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 	/* Check first whether TOP_MEM2 is enabled: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 	rdmsrl(MSR_K8_SYSCFG, msr_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 	if (msr_val & BIT(21)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 		rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 		edac_dbg(0, "  TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 		edac_dbg(0, "  TOP_MEM2 disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 	if (pvt->umc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 		__read_mc_regs_df(pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 		amd64_read_pci_cfg(pvt->F0, DF_DHAR, &pvt->dhar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 		goto skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 	amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 	read_dram_ctl_register(pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 	for (range = 0; range < DRAM_RANGES; range++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 		u8 rw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 		/* read settings for this DRAM range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 		read_dram_base_limit_regs(pvt, range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 		rw = dram_rw(pvt, range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 		if (!rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 		edac_dbg(1, "  DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 			 range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 			 get_dram_base(pvt, range),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 			 get_dram_limit(pvt, range));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 		edac_dbg(1, "   IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 			 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 			 (rw & 0x1) ? "R" : "-",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 			 (rw & 0x2) ? "W" : "-",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 			 dram_intlv_sel(pvt, range),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 			 dram_dst_node(pvt, range));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 	amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 	amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 	amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 	amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 	amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 	if (!dct_ganging_enabled(pvt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 		amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 		amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) skip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 	read_dct_base_mask(pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 	determine_memory_type(pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 	edac_dbg(1, "  DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 	determine_ecc_sym_sz(pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895)  * NOTE: CPU Revision Dependent code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897)  * Input:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898)  *	@csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899)  *	k8 private pointer to -->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900)  *			DRAM Bank Address mapping register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901)  *			node_id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902)  *			DCL register where dual_channel_active is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904)  * The DBAM register consists of 4 sets of 4 bits each definitions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906)  * Bits:	CSROWs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907)  * 0-3		CSROWs 0 and 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908)  * 4-7		CSROWs 2 and 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909)  * 8-11		CSROWs 4 and 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910)  * 12-15	CSROWs 6 and 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912)  * Values range from: 0 to 15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913)  * The meaning of the values depends on CPU revision and dual-channel state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914)  * see relevant BKDG more info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916)  * The memory controller provides for total of only 8 CSROWs in its current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917)  * architecture. Each "pair" of CSROWs normally represents just one DIMM in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918)  * single channel or two (2) DIMMs in dual channel mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920)  * The following code logic collapses the various tables for CSROW based on CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921)  * revision.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923)  * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924)  *	The number of PAGE_SIZE pages on the specified CSROW number it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925)  *	encompasses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 	u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 	int csrow_nr = csrow_nr_orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) 	u32 cs_mode, nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 	if (!pvt->umc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 		csrow_nr >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 		cs_mode = DBAM_DIMM(csrow_nr, dbam);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 		cs_mode = f17_get_cs_mode(csrow_nr >> 1, dct, pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 	nr_pages   = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 	nr_pages <<= 20 - PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 	edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 		    csrow_nr_orig, dct,  cs_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 	edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 	return nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) static int init_csrows_df(struct mem_ctl_info *mci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 	struct amd64_pvt *pvt = mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 	enum edac_type edac_mode = EDAC_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 	enum dev_type dev_type = DEV_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 	struct dimm_info *dimm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 	int empty = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 	u8 umc, cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 	if (mci->edac_ctl_cap & EDAC_FLAG_S16ECD16ED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 		edac_mode = EDAC_S16ECD16ED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) 		dev_type = DEV_X16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) 	} else if (mci->edac_ctl_cap & EDAC_FLAG_S8ECD8ED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) 		edac_mode = EDAC_S8ECD8ED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) 		dev_type = DEV_X8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 	} else if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 		edac_mode = EDAC_S4ECD4ED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 		dev_type = DEV_X4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 	} else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 		edac_mode = EDAC_SECDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 	for_each_umc(umc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 		for_each_chip_select(cs, umc, pvt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 			if (!csrow_enabled(cs, umc, pvt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 			empty = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) 			dimm = mci->csrows[cs]->channels[umc]->dimm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 			edac_dbg(1, "MC node: %d, csrow: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 					pvt->mc_node_id, cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 			dimm->nr_pages = get_csrow_nr_pages(pvt, umc, cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 			dimm->mtype = pvt->dram_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 			dimm->edac_mode = edac_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 			dimm->dtype = dev_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) 			dimm->grain = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 	return empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996)  * Initialize the array of csrow attribute instances, based on the values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997)  * from pci config hardware registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) static int init_csrows(struct mem_ctl_info *mci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 	struct amd64_pvt *pvt = mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) 	enum edac_type edac_mode = EDAC_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 	struct csrow_info *csrow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) 	struct dimm_info *dimm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 	int i, j, empty = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) 	int nr_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) 	if (pvt->umc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) 		return init_csrows_df(mci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) 	amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) 	pvt->nbcfg = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 	edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 		 pvt->mc_node_id, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) 		 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) 	 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) 	for_each_chip_select(i, 0, pvt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 		bool row_dct0 = !!csrow_enabled(i, 0, pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 		bool row_dct1 = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) 		if (pvt->fam != 0xf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) 			row_dct1 = !!csrow_enabled(i, 1, pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) 		if (!row_dct0 && !row_dct1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) 		csrow = mci->csrows[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) 		empty = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) 		edac_dbg(1, "MC node: %d, csrow: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) 			    pvt->mc_node_id, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) 		if (row_dct0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) 			nr_pages = get_csrow_nr_pages(pvt, 0, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 			csrow->channels[0]->dimm->nr_pages = nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 		/* K8 has only one DCT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) 		if (pvt->fam != 0xf && row_dct1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) 			int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) 			csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) 			nr_pages += row_dct1_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) 		edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) 		/* Determine DIMM ECC mode: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) 		if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) 			edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) 					? EDAC_S4ECD4ED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) 					: EDAC_SECDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) 		for (j = 0; j < pvt->channel_count; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) 			dimm = csrow->channels[j]->dimm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) 			dimm->mtype = pvt->dram_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) 			dimm->edac_mode = edac_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) 			dimm->grain = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) 	return empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) /* get all cores on this DCT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) 	for_each_online_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) 		if (amd_get_nb_id(cpu) == nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 			cpumask_set_cpu(cpu, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) /* check MCG_CTL on all the cpus on this node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) static bool nb_mce_bank_enabled_on_node(u16 nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) 	cpumask_var_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 	int cpu, nbe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) 	bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) 	if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) 		amd64_warn("%s: Error allocating mask\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) 	get_cpus_on_this_dct_cpumask(mask, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) 	rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) 	for_each_cpu(cpu, mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) 		struct msr *reg = per_cpu_ptr(msrs, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) 		nbe = reg->l & MSR_MCGCTL_NBE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) 		edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) 			 cpu, reg->q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) 			 (nbe ? "enabled" : "disabled"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) 		if (!nbe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) 	ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) 	free_cpumask_var(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) 	cpumask_var_t cmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) 	if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) 		amd64_warn("%s: error allocating mask\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) 	get_cpus_on_this_dct_cpumask(cmask, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) 	rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) 	for_each_cpu(cpu, cmask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) 		struct msr *reg = per_cpu_ptr(msrs, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) 		if (on) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 			if (reg->l & MSR_MCGCTL_NBE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 				s->flags.nb_mce_enable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) 			reg->l |= MSR_MCGCTL_NBE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 			 * Turn off NB MCE reporting only when it was off before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 			if (!s->flags.nb_mce_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 				reg->l &= ~MSR_MCGCTL_NBE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 	wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 	free_cpumask_var(cmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) 				       struct pci_dev *F3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) 	bool ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) 	u32 value, mask = 0x3;		/* UECC/CECC enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) 	if (toggle_ecc_err_reporting(s, nid, ON)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 		amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) 	amd64_read_pci_cfg(F3, NBCTL, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) 	s->old_nbctl   = value & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) 	s->nbctl_valid = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) 	value |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) 	amd64_write_pci_cfg(F3, NBCTL, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) 	amd64_read_pci_cfg(F3, NBCFG, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) 	edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 		 nid, value, !!(value & NBCFG_ECC_ENABLE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) 	if (!(value & NBCFG_ECC_ENABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) 		amd64_warn("DRAM ECC disabled on this node, enabling...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) 		s->flags.nb_ecc_prev = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) 		/* Attempt to turn on DRAM ECC Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) 		value |= NBCFG_ECC_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) 		amd64_write_pci_cfg(F3, NBCFG, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) 		amd64_read_pci_cfg(F3, NBCFG, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) 		if (!(value & NBCFG_ECC_ENABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) 			amd64_warn("Hardware rejected DRAM ECC enable,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) 				   "check memory DIMM configuration.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) 			ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) 			amd64_info("Hardware accepted DRAM ECC Enable\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) 		s->flags.nb_ecc_prev = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) 	edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) 		 nid, value, !!(value & NBCFG_ECC_ENABLE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) 					struct pci_dev *F3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) 	u32 value, mask = 0x3;		/* UECC/CECC enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) 	if (!s->nbctl_valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) 	amd64_read_pci_cfg(F3, NBCTL, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) 	value &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) 	value |= s->old_nbctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) 	amd64_write_pci_cfg(F3, NBCTL, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) 	/* restore previous BIOS DRAM ECC "off" setting we force-enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) 	if (!s->flags.nb_ecc_prev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) 		amd64_read_pci_cfg(F3, NBCFG, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) 		value &= ~NBCFG_ECC_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) 		amd64_write_pci_cfg(F3, NBCFG, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) 	/* restore the NB Enable MCGCTL bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) 	if (toggle_ecc_err_reporting(s, nid, OFF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) 		amd64_warn("Error restoring NB MCGCTL settings!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) static bool ecc_enabled(struct amd64_pvt *pvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) 	u16 nid = pvt->mc_node_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) 	bool nb_mce_en = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) 	u8 ecc_en = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) 	if (boot_cpu_data.x86 >= 0x17) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) 		u8 umc_en_mask = 0, ecc_en_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) 		struct amd64_umc *umc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) 		for_each_umc(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) 			umc = &pvt->umc[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 			/* Only check enabled UMCs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 			if (!(umc->sdp_ctrl & UMC_SDP_INIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) 			umc_en_mask |= BIT(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) 			if (umc->umc_cap_hi & UMC_ECC_ENABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) 				ecc_en_mask |= BIT(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) 		/* Check whether at least one UMC is enabled: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) 		if (umc_en_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) 			ecc_en = umc_en_mask == ecc_en_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) 			edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) 		/* Assume UMC MCA banks are enabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) 		nb_mce_en = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) 		amd64_read_pci_cfg(pvt->F3, NBCFG, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) 		ecc_en = !!(value & NBCFG_ECC_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) 		nb_mce_en = nb_mce_bank_enabled_on_node(nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) 		if (!nb_mce_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) 			edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) 				     MSR_IA32_MCG_CTL, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) 	amd64_info("Node %d: DRAM ECC %s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) 		   nid, (ecc_en ? "enabled" : "disabled"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) 	if (!ecc_en || !nb_mce_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) 	u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) 	for_each_umc(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) 		if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) 			ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) 			cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) 			dev_x4  &= !!(pvt->umc[i].dimm_cfg & BIT(6));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) 			dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) 	/* Set chipkill only if ECC is enabled: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) 	if (ecc_en) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) 		mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) 		if (!cpk_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) 		if (dev_x4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) 			mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) 		else if (dev_x16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) 			mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) 			mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) static void setup_mci_misc_attrs(struct mem_ctl_info *mci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) 	struct amd64_pvt *pvt = mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) 	mci->mtype_cap		= MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) 	mci->edac_ctl_cap	= EDAC_FLAG_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) 	if (pvt->umc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) 		f17h_determine_edac_ctl_cap(mci, pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) 		if (pvt->nbcap & NBCAP_SECDED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) 			mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) 		if (pvt->nbcap & NBCAP_CHIPKILL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) 			mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) 	mci->edac_cap		= determine_edac_cap(pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) 	mci->mod_name		= EDAC_MOD_STR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) 	mci->ctl_name		= fam_type->ctl_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) 	mci->dev_name		= pci_name(pvt->F3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) 	mci->ctl_page_to_phys	= NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) 	/* memory scrubber interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) 	mci->set_sdram_scrub_rate = set_scrub_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) 	mci->get_sdram_scrub_rate = get_scrub_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344)  * returns a pointer to the family descriptor on success, NULL otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) 	pvt->ext_model  = boot_cpu_data.x86_model >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) 	pvt->stepping	= boot_cpu_data.x86_stepping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) 	pvt->model	= boot_cpu_data.x86_model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) 	pvt->fam	= boot_cpu_data.x86;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) 	switch (pvt->fam) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) 	case 0xf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) 		fam_type	= &family_types[K8_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) 		pvt->ops	= &family_types[K8_CPUS].ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) 	case 0x10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) 		fam_type	= &family_types[F10_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) 		pvt->ops	= &family_types[F10_CPUS].ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) 	case 0x15:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) 		if (pvt->model == 0x30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) 			fam_type = &family_types[F15_M30H_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) 			pvt->ops = &family_types[F15_M30H_CPUS].ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) 		} else if (pvt->model == 0x60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) 			fam_type = &family_types[F15_M60H_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) 			pvt->ops = &family_types[F15_M60H_CPUS].ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) 		/* Richland is only client */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) 		} else if (pvt->model == 0x13) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) 			fam_type	= &family_types[F15_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) 			pvt->ops	= &family_types[F15_CPUS].ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) 	case 0x16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) 		if (pvt->model == 0x30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) 			fam_type = &family_types[F16_M30H_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) 			pvt->ops = &family_types[F16_M30H_CPUS].ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) 		fam_type	= &family_types[F16_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) 		pvt->ops	= &family_types[F16_CPUS].ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) 	case 0x17:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) 		if (pvt->model >= 0x10 && pvt->model <= 0x2f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) 			fam_type = &family_types[F17_M10H_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) 			pvt->ops = &family_types[F17_M10H_CPUS].ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) 		} else if (pvt->model >= 0x30 && pvt->model <= 0x3f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) 			fam_type = &family_types[F17_M30H_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) 			pvt->ops = &family_types[F17_M30H_CPUS].ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) 		} else if (pvt->model >= 0x60 && pvt->model <= 0x6f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) 			fam_type = &family_types[F17_M60H_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) 			pvt->ops = &family_types[F17_M60H_CPUS].ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) 		} else if (pvt->model >= 0x70 && pvt->model <= 0x7f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) 			fam_type = &family_types[F17_M70H_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) 			pvt->ops = &family_types[F17_M70H_CPUS].ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) 	case 0x18:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) 		fam_type	= &family_types[F17_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) 		pvt->ops	= &family_types[F17_CPUS].ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) 		if (pvt->fam == 0x18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) 			family_types[F17_CPUS].ctl_name = "F18h";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) 	case 0x19:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) 		if (pvt->model >= 0x20 && pvt->model <= 0x2f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) 			fam_type = &family_types[F17_M70H_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) 			pvt->ops = &family_types[F17_M70H_CPUS].ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) 			fam_type->ctl_name = "F19h_M20h";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) 		fam_type	= &family_types[F19_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) 		pvt->ops	= &family_types[F19_CPUS].ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) 		family_types[F19_CPUS].ctl_name = "F19h";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) 		amd64_err("Unsupported family!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) 	amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) 		     (pvt->fam == 0xf ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) 				(pvt->ext_model >= K8_REV_F  ? "revF or later "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) 							     : "revE or earlier ")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) 				 : ""), pvt->mc_node_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) 	return fam_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) static const struct attribute_group *amd64_edac_attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) #ifdef CONFIG_EDAC_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) 	&amd64_edac_dbg_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) #ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) 	&amd64_edac_inj_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) 	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) static int hw_info_get(struct amd64_pvt *pvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) 	u16 pci_id1, pci_id2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) 	if (pvt->fam >= 0x17) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) 		pvt->umc = kcalloc(fam_type->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) 		if (!pvt->umc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) 		pci_id1 = fam_type->f0_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) 		pci_id2 = fam_type->f6_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) 		pci_id1 = fam_type->f1_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) 		pci_id2 = fam_type->f2_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) 	ret = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) 	read_mc_regs(pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) static void hw_info_put(struct amd64_pvt *pvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) 	if (pvt->F0 || pvt->F1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) 		free_mc_sibling_devs(pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) 	kfree(pvt->umc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) static int init_one_instance(struct amd64_pvt *pvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) 	struct mem_ctl_info *mci = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) 	struct edac_mc_layer layers[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) 	int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) 	 * We need to determine how many memory channels there are. Then use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) 	 * that information for calculating the size of the dynamic instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) 	 * tables in the 'mci' structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) 	pvt->channel_count = pvt->ops->early_channel_count(pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) 	if (pvt->channel_count < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) 	ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) 	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) 	layers[0].size = pvt->csels[0].b_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) 	layers[0].is_virt_csrow = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) 	layers[1].type = EDAC_MC_LAYER_CHANNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) 	 * Always allocate two channels since we can have setups with DIMMs on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) 	 * only one channel. Also, this simplifies handling later for the price
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) 	 * of a couple of KBs tops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) 	layers[1].size = fam_type->max_mcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) 	layers[1].is_virt_csrow = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) 	mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layers, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) 	if (!mci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) 	mci->pvt_info = pvt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) 	mci->pdev = &pvt->F3->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) 	setup_mci_misc_attrs(mci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) 	if (init_csrows(mci))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) 		mci->edac_cap = EDAC_FLAG_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) 	ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) 	if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) 		edac_dbg(1, "failed edac_mc_add_mc()\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) 		edac_mc_free(mci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) static bool instance_has_memory(struct amd64_pvt *pvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) 	bool cs_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) 	int cs = 0, dct = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) 	for (dct = 0; dct < fam_type->max_mcs; dct++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) 		for_each_chip_select(cs, dct, pvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) 			cs_enabled |= csrow_enabled(cs, dct, pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) 	return cs_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) static int probe_one_instance(unsigned int nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) 	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) 	struct amd64_pvt *pvt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) 	struct ecc_settings *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) 	ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) 	s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) 	if (!s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) 	ecc_stngs[nid] = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) 	pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) 	if (!pvt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) 		goto err_settings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) 	pvt->mc_node_id	= nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) 	pvt->F3 = F3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) 	ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) 	fam_type = per_family_init(pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) 	if (!fam_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) 		goto err_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) 	ret = hw_info_get(pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) 		goto err_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) 	if (!instance_has_memory(pvt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) 		amd64_info("Node %d: No DIMMs detected.\n", nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) 		goto err_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) 	if (!ecc_enabled(pvt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) 		ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) 		if (!ecc_enable_override)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) 			goto err_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) 		if (boot_cpu_data.x86 >= 0x17) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) 			amd64_warn("Forcing ECC on is not recommended on newer systems. Please enable ECC in BIOS.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) 			goto err_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) 			amd64_warn("Forcing ECC on!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) 		if (!enable_ecc_error_reporting(s, nid, F3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) 			goto err_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) 	ret = init_one_instance(pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) 		amd64_err("Error probing instance: %d\n", nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) 		if (boot_cpu_data.x86 < 0x17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) 			restore_ecc_error_reporting(s, nid, F3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) 		goto err_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) 	dump_misc_regs(pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) err_enable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) 	hw_info_put(pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) 	kfree(pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) err_settings:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) 	kfree(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) 	ecc_stngs[nid] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) static void remove_one_instance(unsigned int nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) 	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) 	struct ecc_settings *s = ecc_stngs[nid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) 	struct mem_ctl_info *mci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) 	struct amd64_pvt *pvt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) 	/* Remove from EDAC CORE tracking list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) 	mci = edac_mc_del_mc(&F3->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) 	if (!mci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) 	pvt = mci->pvt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) 	restore_ecc_error_reporting(s, nid, F3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) 	kfree(ecc_stngs[nid]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) 	ecc_stngs[nid] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) 	/* Free the EDAC CORE resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) 	mci->pvt_info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) 	hw_info_put(pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) 	kfree(pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) 	edac_mc_free(mci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) static void setup_pci_device(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) 	if (pci_ctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) 	pci_ctl = edac_pci_create_generic_ctl(pci_ctl_dev, EDAC_MOD_STR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) 	if (!pci_ctl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) 		pr_warn("%s(): Unable to create PCI control\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) 		pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) static const struct x86_cpu_id amd64_cpuids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) 	X86_MATCH_VENDOR_FAM(AMD,	0x0F, NULL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) 	X86_MATCH_VENDOR_FAM(AMD,	0x10, NULL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) 	X86_MATCH_VENDOR_FAM(AMD,	0x15, NULL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) 	X86_MATCH_VENDOR_FAM(AMD,	0x16, NULL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) 	X86_MATCH_VENDOR_FAM(AMD,	0x17, NULL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) 	X86_MATCH_VENDOR_FAM(HYGON,	0x18, NULL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) 	X86_MATCH_VENDOR_FAM(AMD,	0x19, NULL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) 	{ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) static int __init amd64_edac_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) 	const char *owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) 	int err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) 	owner = edac_get_owner();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) 	if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) 	if (!x86_match_cpu(amd64_cpuids))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) 	if (amd_cache_northbridges() < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) 	opstate_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) 	err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) 	ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) 	if (!ecc_stngs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) 		goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) 	msrs = msrs_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) 	if (!msrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) 		goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) 	for (i = 0; i < amd_nb_num(); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) 		err = probe_one_instance(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) 			/* unwind properly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) 			while (--i >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) 				remove_one_instance(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) 			goto err_pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) 	if (!edac_has_mcs()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) 		err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) 		goto err_pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) 	/* register stuff with EDAC MCE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) 	if (boot_cpu_data.x86 >= 0x17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) 		amd_register_ecc_decoder(decode_umc_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) 		amd_register_ecc_decoder(decode_bus_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) 	setup_pci_device();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) 	amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) 	printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) err_pci:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) 	pci_ctl_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) 	msrs_free(msrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) 	msrs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) 	kfree(ecc_stngs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) 	ecc_stngs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) static void __exit amd64_edac_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) 	if (pci_ctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) 		edac_pci_release_generic_ctl(pci_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) 	/* unregister from EDAC MCE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) 	if (boot_cpu_data.x86 >= 0x17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) 		amd_unregister_ecc_decoder(decode_umc_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) 		amd_unregister_ecc_decoder(decode_bus_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) 	for (i = 0; i < amd_nb_num(); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) 		remove_one_instance(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) 	kfree(ecc_stngs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) 	ecc_stngs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) 	pci_ctl_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) 	msrs_free(msrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) 	msrs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) module_init(amd64_edac_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) module_exit(amd64_edac_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) 		"Dave Peterson, Thayne Harbaugh");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) 		EDAC_AMD64_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) module_param(edac_op_state, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");