^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Shared support code for AMD K8 northbridges and derivates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright 2006 Andi Kleen, SUSE Labs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/pci_ids.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/amd_nb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define PCI_DEVICE_ID_AMD_17H_M60H_ROOT 0x1630
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define PCI_DEVICE_ID_AMD_19H_DF_F4 0x1654
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /* Protect the PCI config register pairs used for SMN and DF indirect access. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static DEFINE_MUTEX(smn_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static u32 *flush_words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static const struct pci_device_id amd_root_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define PCI_DEVICE_ID_AMD_CNB17H_F4 0x1704
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static const struct pci_device_id amd_nb_misc_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static const struct pci_device_id amd_nb_link_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static const struct pci_device_id hygon_root_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static const struct pci_device_id hygon_nb_misc_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static const struct pci_device_id hygon_nb_link_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) { 0x00, 0x18, 0x20 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) { 0xff, 0x00, 0x20 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) { 0xfe, 0x00, 0x20 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static struct amd_northbridge_info amd_northbridges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) u16 amd_nb_num(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return amd_northbridges.num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) EXPORT_SYMBOL_GPL(amd_nb_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) bool amd_nb_has_feature(unsigned int feature)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return ((amd_northbridges.flags & feature) == feature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) EXPORT_SYMBOL_GPL(amd_nb_has_feature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct amd_northbridge *node_to_amd_nb(int node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) EXPORT_SYMBOL_GPL(node_to_amd_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static struct pci_dev *next_northbridge(struct pci_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) const struct pci_device_id *ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) } while (!pci_match_id(ids, dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct pci_dev *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) int err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (node >= amd_northbridges.num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) root = node_to_amd_nb(node)->root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (!root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) mutex_lock(&smn_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) err = pci_write_config_dword(root, 0x60, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) pr_warn("Error programming SMN address 0x%x.\n", address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) err = (write ? pci_write_config_dword(root, 0x64, *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) : pci_read_config_dword(root, 0x64, value));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) pr_warn("Error %s SMN address 0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) (write ? "writing to" : "reading from"), address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) mutex_unlock(&smn_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) int amd_smn_read(u16 node, u32 address, u32 *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return __amd_smn_rw(node, address, value, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) EXPORT_SYMBOL_GPL(amd_smn_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) int amd_smn_write(u16 node, u32 address, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return __amd_smn_rw(node, address, &value, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) EXPORT_SYMBOL_GPL(amd_smn_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * Data Fabric Indirect Access uses FICAA/FICAD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * Fabric Indirect Configuration Access Address (FICAA): Constructed based
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * on the device's Instance Id and the PCI function and register offset of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * the desired register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * and FICAD HI registers but so far we only need the LO register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct pci_dev *F4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) u32 ficaa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) int err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (node >= amd_northbridges.num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) F4 = node_to_amd_nb(node)->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (!F4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) ficaa = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) ficaa |= reg & 0x3FC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) ficaa |= (func & 0x7) << 11;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) ficaa |= instance_id << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) mutex_lock(&smn_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) err = pci_write_config_dword(F4, 0x5C, ficaa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) err = pci_read_config_dword(F4, 0x98, lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) mutex_unlock(&smn_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) EXPORT_SYMBOL_GPL(amd_df_indirect_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) int amd_cache_northbridges(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) const struct pci_device_id *misc_ids = amd_nb_misc_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) const struct pci_device_id *link_ids = amd_nb_link_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) const struct pci_device_id *root_ids = amd_root_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct pci_dev *root, *misc, *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct amd_northbridge *nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) u16 roots_per_misc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) u16 misc_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) u16 root_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) u16 i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (amd_northbridges.num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) root_ids = hygon_root_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) misc_ids = hygon_nb_misc_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) link_ids = hygon_nb_link_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) misc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) while ((misc = next_northbridge(misc, misc_ids)) != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) misc_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (!misc_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) root = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) while ((root = next_northbridge(root, root_ids)) != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) root_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (root_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) roots_per_misc = root_count / misc_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * There should be _exactly_ N roots for each DF/SMN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (!roots_per_misc || (root_count % roots_per_misc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) pr_info("Unsupported AMD DF/PCI configuration found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (!nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) amd_northbridges.nb = nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) amd_northbridges.num = misc_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) link = misc = root = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) for (i = 0; i < amd_northbridges.num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) node_to_amd_nb(i)->root = root =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) next_northbridge(root, root_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) node_to_amd_nb(i)->misc = misc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) next_northbridge(misc, misc_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) node_to_amd_nb(i)->link = link =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) next_northbridge(link, link_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * If there are more PCI root devices than data fabric/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * system management network interfaces, then the (N)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * PCI roots per DF/SMN interface are functionally the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * same (for DF/SMN access) and N-1 are redundant. N-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * PCI roots should be skipped per DF/SMN interface so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * the following DF/SMN interfaces get mapped to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * correct PCI roots.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) for (j = 1; j < roots_per_misc; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) root = next_northbridge(root, root_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (amd_gart_present())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) amd_northbridges.flags |= AMD_NB_GART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * Check for L3 cache presence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (!cpuid_edx(0x80000006))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * Some CPU families support L3 Cache Index Disable. There are some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * limitations because of E382 and E388 on family 0x10.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (boot_cpu_data.x86 == 0x10 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) boot_cpu_data.x86_model >= 0x8 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) (boot_cpu_data.x86_model > 0x9 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) boot_cpu_data.x86_stepping >= 0x1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (boot_cpu_data.x86 == 0x15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) /* L3 cache partitioning is supported on family 0x15 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (boot_cpu_data.x86 == 0x15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) EXPORT_SYMBOL_GPL(amd_cache_northbridges);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * Ignores subdevice/subvendor but as far as I can figure out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * they're useless anyways
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) bool __init early_is_amd_nb(u32 device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) const struct pci_device_id *misc_ids = amd_nb_misc_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) const struct pci_device_id *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) u32 vendor = device & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) misc_ids = hygon_nb_misc_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) device >>= 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) for (id = misc_ids; id->vendor; id++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (vendor == id->vendor && device == id->device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) struct resource *amd_get_mmconfig_range(struct resource *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) u32 address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) u64 base, msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) unsigned int segn_busn_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) /* assume all cpus from fam10h have mmconfig */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (boot_cpu_data.x86 < 0x10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) address = MSR_FAM10H_MMIO_CONF_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) rdmsrl(address, msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /* mmconfig is not enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (!(msr & FAM10H_MMIO_CONF_ENABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) FAM10H_MMIO_CONF_BUSRANGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) res->flags = IORESOURCE_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) res->start = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) int amd_get_subcaches(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) unsigned int mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) pci_read_config_dword(link, 0x1d4, &mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) int amd_set_subcaches(int cpu, unsigned long mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) static unsigned int reset, ban;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) unsigned int reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) int cuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /* if necessary, collect reset state of L3 partitioning and BAN mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (reset == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) pci_read_config_dword(nb->link, 0x1d4, &reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) pci_read_config_dword(nb->misc, 0x1b8, &ban);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) ban &= 0x180000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) /* deactivate BAN mode if any subcaches are to be disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (mask != 0xf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) pci_read_config_dword(nb->misc, 0x1b8, ®);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) cuid = cpu_data(cpu).cpu_core_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) mask <<= 4 * cuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) mask |= (0xf ^ (1 << cuid)) << 26;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) pci_write_config_dword(nb->link, 0x1d4, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) /* reset BAN mode if L3 partitioning returned to reset state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) pci_read_config_dword(nb->link, 0x1d4, ®);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (reg == reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) pci_read_config_dword(nb->misc, 0x1b8, ®);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) reg &= ~0x180000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) static void amd_cache_gart(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) u16 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (!amd_nb_has_feature(AMD_NB_GART))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (!flush_words) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) amd_northbridges.flags &= ~AMD_NB_GART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) pr_notice("Cannot initialize GART flush words, GART support disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) for (i = 0; i != amd_northbridges.num; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) void amd_flush_garts(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) int flushed, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) static DEFINE_SPINLOCK(gart_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (!amd_nb_has_feature(AMD_NB_GART))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * Avoid races between AGP and IOMMU. In theory it's not needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * but I'm not sure if the hardware won't lose flush requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * when another is pending. This whole thing is so expensive anyways
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * that it doesn't matter to serialize more. -AK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) spin_lock_irqsave(&gart_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) flushed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) for (i = 0; i < amd_northbridges.num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) flush_words[i] | 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) flushed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) for (i = 0; i < amd_northbridges.num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) u32 w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) /* Make sure the hardware actually executed the flush*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) pci_read_config_dword(node_to_amd_nb(i)->misc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 0x9c, &w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (!(w & 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) spin_unlock_irqrestore(&gart_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (!flushed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) pr_notice("nothing to flush?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) EXPORT_SYMBOL_GPL(amd_flush_garts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) static void __fix_erratum_688(void *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) #define MSR_AMD64_IC_CFG 0xC0011021
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) msr_set_bit(MSR_AMD64_IC_CFG, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) msr_set_bit(MSR_AMD64_IC_CFG, 14);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /* Apply erratum 688 fix so machines without a BIOS fix work. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) static __init void fix_erratum_688(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) struct pci_dev *F4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (boot_cpu_data.x86 != 0x14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (!amd_northbridges.num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) F4 = node_to_amd_nb(0)->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if (!F4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (pci_read_config_dword(F4, 0x164, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (val & BIT(2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) on_each_cpu(__fix_erratum_688, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) static __init int init_amd_nbs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) amd_cache_northbridges();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) amd_cache_gart();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) fix_erratum_688();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) /* This has to go after the PCI subsystem */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) fs_initcall(init_amd_nbs);