Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * This file contains work-arounds for x86 and x86_64 platform bugs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/dmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <asm/hpet.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <asm/mce.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) static void quirk_intel_irqbalance(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 	u8 config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 	u16 word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	/* BIOS may enable hardware IRQ balancing for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	 * E7520/E7320/E7525(revision ID 0x9 and below)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	 * based platforms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	 * Disable SW irqbalance/affinity on those platforms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	if (dev->revision > 0x9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	/* enable access to config space*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	pci_read_config_byte(dev, 0xf4, &config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	pci_write_config_byte(dev, 0xf4, config|0x2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	 * read xTPR register.  We may not have a pci_dev for device 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	 * because it might be hidden until the above write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	pci_bus_read_config_word(dev->bus, PCI_DEVFN(8, 0), 0x4c, &word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	if (!(word & (1 << 13))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 		dev_info(&dev->dev, "Intel E7520/7320/7525 detected; "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 			"disabling irq balancing and affinity\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		noirqdebug_setup("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 		no_irq_affinity = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	/* put back the original value for config space*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	if (!(config & 0x2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 		pci_write_config_byte(dev, 0xf4, config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 			quirk_intel_irqbalance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 			quirk_intel_irqbalance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 			quirk_intel_irqbalance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) #if defined(CONFIG_HPET_TIMER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) unsigned long force_hpet_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) static enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	NONE_FORCE_HPET_RESUME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	OLD_ICH_FORCE_HPET_RESUME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	ICH_FORCE_HPET_RESUME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	VT8237_FORCE_HPET_RESUME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	NVIDIA_FORCE_HPET_RESUME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	ATI_FORCE_HPET_RESUME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) } force_hpet_resume_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) static void __iomem *rcba_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) static void ich_force_hpet_resume(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	if (!force_hpet_address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	BUG_ON(rcba_base == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	/* read the Function Disable register, dword mode only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	val = readl(rcba_base + 0x3404);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	if (!(val & 0x80)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		/* HPET disabled in HPTC. Trying to enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		writel(val | 0x80, rcba_base + 0x3404);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	val = readl(rcba_base + 0x3404);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	if (!(val & 0x80))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		printk(KERN_DEBUG "Force enabled HPET at resume\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) static void ich_force_enable_hpet(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	u32 rcba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	if (hpet_address || force_hpet_address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	pci_read_config_dword(dev, 0xF0, &rcba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	rcba &= 0xFFFFC000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	if (rcba == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		dev_printk(KERN_DEBUG, &dev->dev, "RCBA disabled; "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 			"cannot force enable HPET\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	/* use bits 31:14, 16 kB aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	rcba_base = ioremap(rcba, 0x4000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	if (rcba_base == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		dev_printk(KERN_DEBUG, &dev->dev, "ioremap failed; "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 			"cannot force enable HPET\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	/* read the Function Disable register, dword mode only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	val = readl(rcba_base + 0x3404);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	if (val & 0x80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		/* HPET is enabled in HPTC. Just not reported by BIOS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		val = val & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		force_hpet_address = 0xFED00000 | (val << 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 			"0x%lx\n", force_hpet_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		iounmap(rcba_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	/* HPET disabled in HPTC. Trying to enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	writel(val | 0x80, rcba_base + 0x3404);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	val = readl(rcba_base + 0x3404);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	if (!(val & 0x80)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		err = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		val = val & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		force_hpet_address = 0xFED00000 | (val << 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		force_hpet_address = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		iounmap(rcba_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		dev_printk(KERN_DEBUG, &dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 			"Failed to force enable HPET\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		force_hpet_resume_type = ICH_FORCE_HPET_RESUME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 			"0x%lx\n", force_hpet_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 			 ich_force_enable_hpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 			 ich_force_enable_hpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 			 ich_force_enable_hpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 			 ich_force_enable_hpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 			 ich_force_enable_hpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 			 ich_force_enable_hpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 			 ich_force_enable_hpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 			 ich_force_enable_hpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 			 ich_force_enable_hpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x3a16,	/* ICH10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 			 ich_force_enable_hpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static struct pci_dev *cached_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static void hpet_print_force_info(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	printk(KERN_INFO "HPET not enabled in BIOS. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	       "You might try hpet=force boot option\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static void old_ich_force_hpet_resume(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	u32 gen_cntl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	if (!force_hpet_address || !cached_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	gen_cntl &= (~(0x7 << 15));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	gen_cntl |= (0x4 << 15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	pci_write_config_dword(cached_dev, 0xD0, gen_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	val = gen_cntl >> 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	val &= 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	if (val == 0x4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		printk(KERN_DEBUG "Force enabled HPET at resume\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static void old_ich_force_enable_hpet(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	u32 gen_cntl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	if (hpet_address || force_hpet_address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	pci_read_config_dword(dev, 0xD0, &gen_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	 * Bit 17 is HPET enable bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	 * Bit 16:15 control the HPET base address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	val = gen_cntl >> 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	val &= 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	if (val & 0x4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		val &= 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		force_hpet_address = 0xFED00000 | (val << 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 			force_hpet_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	 * HPET is disabled. Trying enabling at FED00000 and check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	 * whether it sticks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	gen_cntl &= (~(0x7 << 15));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	gen_cntl |= (0x4 << 15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	pci_write_config_dword(dev, 0xD0, gen_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	pci_read_config_dword(dev, 0xD0, &gen_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	val = gen_cntl >> 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	val &= 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	if (val & 0x4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		/* HPET is enabled in HPTC. Just not reported by BIOS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		val &= 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		force_hpet_address = 0xFED00000 | (val << 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 			"0x%lx\n", force_hpet_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		cached_dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		force_hpet_resume_type = OLD_ICH_FORCE_HPET_RESUME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)  * Undocumented chipset features. Make sure that the user enforced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)  * this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static void old_ich_force_enable_hpet_user(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	if (hpet_force_user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		old_ich_force_enable_hpet(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 			 old_ich_force_enable_hpet_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 			 old_ich_force_enable_hpet_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 			 old_ich_force_enable_hpet_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 			 old_ich_force_enable_hpet_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 			 old_ich_force_enable_hpet_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 			 old_ich_force_enable_hpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 			 old_ich_force_enable_hpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static void vt8237_force_hpet_resume(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	if (!force_hpet_address || !cached_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	val = 0xfed00000 | 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	pci_write_config_dword(cached_dev, 0x68, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	pci_read_config_dword(cached_dev, 0x68, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	if (val & 0x80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 		printk(KERN_DEBUG "Force enabled HPET at resume\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static void vt8237_force_enable_hpet(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	if (hpet_address || force_hpet_address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	if (!hpet_force_user) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		hpet_print_force_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	pci_read_config_dword(dev, 0x68, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	 * Bit 7 is HPET enable bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	 * Bit 31:10 is HPET base address (contrary to what datasheet claims)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	if (val & 0x80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		force_hpet_address = (val & ~0x3ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 			force_hpet_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	 * HPET is disabled. Trying enabling at FED00000 and check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	 * whether it sticks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	val = 0xfed00000 | 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	pci_write_config_dword(dev, 0x68, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	pci_read_config_dword(dev, 0x68, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	if (val & 0x80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		force_hpet_address = (val & ~0x3ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 			"0x%lx\n", force_hpet_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		cached_dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		force_hpet_resume_type = VT8237_FORCE_HPET_RESUME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 			 vt8237_force_enable_hpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 			 vt8237_force_enable_hpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_CX700,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 			 vt8237_force_enable_hpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) static void ati_force_hpet_resume(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	pci_write_config_dword(cached_dev, 0x14, 0xfed00000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	printk(KERN_DEBUG "Force enabled HPET at resume\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static u32 ati_ixp4x0_rev(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	u32 d = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	u8  b = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	err = pci_read_config_byte(dev, 0xac, &b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	b &= ~(1<<5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	err |= pci_write_config_byte(dev, 0xac, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	err |= pci_read_config_dword(dev, 0x70, &d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	d |= 1<<8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	err |= pci_write_config_dword(dev, 0x70, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	err |= pci_read_config_dword(dev, 0x8, &d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	d &= 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	dev_printk(KERN_DEBUG, &dev->dev, "SB4X0 revision 0x%x\n", d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	WARN_ON_ONCE(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	return d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) static void ati_force_enable_hpet(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	u32 d, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	u8  b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	if (hpet_address || force_hpet_address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	if (!hpet_force_user) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		hpet_print_force_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	d = ati_ixp4x0_rev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	if (d  < 0x82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	/* base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	pci_write_config_dword(dev, 0x14, 0xfed00000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	pci_read_config_dword(dev, 0x14, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	/* enable interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	outb(0x72, 0xcd6); b = inb(0xcd7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	b |= 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	outb(0x72, 0xcd6); outb(b, 0xcd7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	outb(0x72, 0xcd6); b = inb(0xcd7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	if (!(b & 0x1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	pci_read_config_dword(dev, 0x64, &d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	d |= (1<<10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	pci_write_config_dword(dev, 0x64, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	pci_read_config_dword(dev, 0x64, &d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	if (!(d & (1<<10)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	force_hpet_address = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	force_hpet_resume_type = ATI_FORCE_HPET_RESUME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		   force_hpet_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	cached_dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 			 ati_force_enable_hpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)  * Undocumented chipset feature taken from LinuxBIOS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) static void nvidia_force_hpet_resume(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	pci_write_config_dword(cached_dev, 0x44, 0xfed00001);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	printk(KERN_DEBUG "Force enabled HPET at resume\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) static void nvidia_force_enable_hpet(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	if (hpet_address || force_hpet_address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	if (!hpet_force_user) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 		hpet_print_force_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	pci_write_config_dword(dev, 0x44, 0xfed00001);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	pci_read_config_dword(dev, 0x44, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	force_hpet_address = val & 0xfffffffe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	force_hpet_resume_type = NVIDIA_FORCE_HPET_RESUME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 		force_hpet_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	cached_dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) /* ISA Bridges */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0050,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 			nvidia_force_enable_hpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0051,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 			nvidia_force_enable_hpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /* LPC bridges */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0260,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 			nvidia_force_enable_hpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0360,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 			nvidia_force_enable_hpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0361,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 			nvidia_force_enable_hpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0362,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 			nvidia_force_enable_hpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0363,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 			nvidia_force_enable_hpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0364,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 			nvidia_force_enable_hpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0365,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 			nvidia_force_enable_hpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0366,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 			nvidia_force_enable_hpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0367,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 			nvidia_force_enable_hpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) void force_hpet_resume(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	switch (force_hpet_resume_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	case ICH_FORCE_HPET_RESUME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 		ich_force_hpet_resume();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	case OLD_ICH_FORCE_HPET_RESUME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 		old_ich_force_hpet_resume();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	case VT8237_FORCE_HPET_RESUME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 		vt8237_force_hpet_resume();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	case NVIDIA_FORCE_HPET_RESUME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 		nvidia_force_hpet_resume();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	case ATI_FORCE_HPET_RESUME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 		ati_force_hpet_resume();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)  * According to the datasheet e6xx systems have the HPET hardwired to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)  * 0xfed00000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) static void e6xx_force_enable_hpet(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	if (hpet_address || force_hpet_address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	force_hpet_address = 0xFED00000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	force_hpet_resume_type = NONE_FORCE_HPET_RESUME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 		"0x%lx\n", force_hpet_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E6XX_CU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 			 e6xx_force_enable_hpet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)  * HPET MSI on some boards (ATI SB700/SB800) has side effect on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)  * floppy DMA. Disable HPET MSI on such platforms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)  * See erratum #27 (Misinterpreted MSI Requests May Result in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)  * Corrupted LPC DMA Data) in AMD Publication #46837,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)  * "SB700 Family Product Errata", Rev. 1.0, March 2010.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) static void force_disable_hpet_msi(struct pci_dev *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	hpet_msi_disable = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 			 force_disable_hpet_msi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) #if defined(CONFIG_PCI) && defined(CONFIG_NUMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) /* Set correct numa_node information for AMD NB functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) static void quirk_amd_nb_node(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	struct pci_dev *nb_ht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	unsigned int devfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	u32 node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	nb_ht = pci_get_slot(dev->bus, devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	if (!nb_ht)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	pci_read_config_dword(nb_ht, 0x60, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	node = pcibus_to_node(dev->bus) | (val & 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	 * Some hardware may return an invalid node ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	 * so check it first:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	if (node_online(node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 		set_dev_node(&dev->dev, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	pci_dev_put(nb_ht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 			quirk_amd_nb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 			quirk_amd_nb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 			quirk_amd_nb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 			quirk_amd_nb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_HT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 			quirk_amd_nb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 			quirk_amd_nb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_DRAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 			quirk_amd_nb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 			quirk_amd_nb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_LINK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 			quirk_amd_nb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 			quirk_amd_nb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 			quirk_amd_nb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 			quirk_amd_nb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 			quirk_amd_nb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 			quirk_amd_nb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 			quirk_amd_nb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) #ifdef CONFIG_PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)  * Processor does not ensure DRAM scrub read/write sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)  * is atomic wrt accesses to CC6 save state area. Therefore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)  * if a concurrent scrub read/write access is to same address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)  * the entry may appear as if it is not written. This quirk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)  * applies to Fam16h models 00h-0Fh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)  * See "Revision Guide" for AMD F16h models 00h-0fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)  * document 51810 rev. 3.04, Nov 2013
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	 * Suggested workaround:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	 * set D18F3x58[4:0] = 00h and set D18F3x5C[0] = 0b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	pci_read_config_dword(dev, 0x58, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	if (val & 0x1F) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 		val &= ~(0x1F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 		pci_write_config_dword(dev, 0x58, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	pci_read_config_dword(dev, 0x5C, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	if (val & BIT(0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 		val &= ~BIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 		pci_write_config_dword(dev, 0x5c, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 			amd_disable_seq_and_redirect_scrub);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) /* Ivy Bridge, Haswell, Broadwell */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	u32 capid0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 	pci_read_config_dword(pdev, 0x84, &capid0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	if (capid0 & 0x10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 		enable_copy_mc_fragile();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) /* Skylake */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) static void quirk_intel_purley_xeon_ras_cap(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	u32 capid0, capid5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	pci_read_config_dword(pdev, 0x84, &capid0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	pci_read_config_dword(pdev, 0x98, &capid5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	 * CAPID0{7:6} indicate whether this is an advanced RAS SKU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 	 * CAPID5{8:5} indicate that various NVDIMM usage modes are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 	 * enabled, so memory machine check recovery is also enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 	if ((capid0 & 0xc0) == 0xc0 || (capid5 & 0x1e0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 		enable_copy_mc_fragile();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_ras_cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, quirk_intel_brickland_xeon_ras_cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2083, quirk_intel_purley_xeon_ras_cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) bool x86_apple_machine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) EXPORT_SYMBOL(x86_apple_machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) void __init early_platform_quirks(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 	x86_apple_machine = dmi_match(DMI_SYS_VENDOR, "Apple Inc.") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 			    dmi_match(DMI_SYS_VENDOR, "Apple Computer, Inc.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }