Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Intel I/OAT DMA Linux driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright(c) 2007 - 2009 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/dca.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) /* either a kernel change is needed, or we need something like this in kernel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #ifndef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #undef cpu_physical_id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #define cpu_physical_id(cpu) (cpuid_ebx(1) >> 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include "dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include "registers.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * contain the bit number of the APIC ID to map into the DCA tag.  If the valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * bit is not set, then the value must be 0 or 1 and defines the bit in the tag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define DCA_TAG_MAP_VALID 0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #define DCA3_TAG_MAP_BIT_TO_INV 0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define DCA3_TAG_MAP_BIT_TO_SEL 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #define DCA3_TAG_MAP_LITERAL_VAL 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define DCA_TAG_MAP_MASK 0xDF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) /* expected tag map bytes for I/OAT ver.2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #define DCA2_TAG_MAP_BYTE0 0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define DCA2_TAG_MAP_BYTE1 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define DCA2_TAG_MAP_BYTE2 0x81
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define DCA2_TAG_MAP_BYTE3 0x82
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #define DCA2_TAG_MAP_BYTE4 0x82
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  * "Legacy" DCA systems do not implement the DCA register set in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  * I/OAT device.  Software needs direct support for their tag mappings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #define APICID_BIT(x)		(DCA_TAG_MAP_VALID | (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #define IOAT_TAG_MAP_LEN	8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) /* pack PCI B/D/F into a u16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) static inline u16 dcaid_from_pcidev(struct pci_dev *pci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	return (pci->bus->number << 8) | pci->devfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) static int dca_enabled_in_bios(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	/* CPUID level 9 returns DCA configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	/* Bit 0 indicates DCA enabled by the BIOS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	unsigned long cpuid_level_9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	cpuid_level_9 = cpuid_eax(9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	res = test_bit(0, &cpuid_level_9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		dev_dbg(&pdev->dev, "DCA is disabled in BIOS\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) int system_has_dca_enabled(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	if (boot_cpu_has(X86_FEATURE_DCA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		return dca_enabled_in_bios(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	dev_dbg(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) struct ioat_dca_slot {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	struct pci_dev *pdev;	/* requester device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	u16 rid;		/* requester id, as used by IOAT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) #define IOAT_DCA_MAX_REQ 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) #define IOAT3_DCA_MAX_REQ 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) struct ioat_dca_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	void __iomem		*iobase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	void __iomem		*dca_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	int			 max_requesters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	int			 requester_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	u8			 tag_map[IOAT_TAG_MAP_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	struct ioat_dca_slot	 req_slots[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) static int ioat_dca_dev_managed(struct dca_provider *dca,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 				struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	struct ioat_dca_priv *ioatdca = dca_priv(dca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	pdev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	for (i = 0; i < ioatdca->max_requesters; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		if (ioatdca->req_slots[i].pdev == pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	struct ioat_dca_priv *ioatdca = dca_priv(dca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	u16 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	u16 global_req_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	/* This implementation only supports PCI-Express */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	if (!dev_is_pci(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	pdev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	id = dcaid_from_pcidev(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	if (ioatdca->requester_count == ioatdca->max_requesters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	for (i = 0; i < ioatdca->max_requesters; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		if (ioatdca->req_slots[i].pdev == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 			/* found an empty slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 			ioatdca->requester_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 			ioatdca->req_slots[i].pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 			ioatdca->req_slots[i].rid = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 			global_req_table =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 			      readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 			writel(id | IOAT_DCA_GREQID_VALID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 			       ioatdca->iobase + global_req_table + (i * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 			return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	/* Error, ioatdma->requester_count is out of whack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static int ioat_dca_remove_requester(struct dca_provider *dca,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 				      struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	struct ioat_dca_priv *ioatdca = dca_priv(dca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	u16 global_req_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	/* This implementation only supports PCI-Express */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	if (!dev_is_pci(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	pdev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	for (i = 0; i < ioatdca->max_requesters; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		if (ioatdca->req_slots[i].pdev == pdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 			global_req_table =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 			      readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 			writel(0, ioatdca->iobase + global_req_table + (i * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 			ioatdca->req_slots[i].pdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 			ioatdca->req_slots[i].rid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 			ioatdca->requester_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 			return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static u8 ioat_dca_get_tag(struct dca_provider *dca,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 			    struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 			    int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	u8 tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	struct ioat_dca_priv *ioatdca = dca_priv(dca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	int i, apic_id, bit, value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	u8 entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	tag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	apic_id = cpu_physical_id(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		entry = ioatdca->tag_map[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		if (entry & DCA3_TAG_MAP_BIT_TO_SEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 			bit = entry &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 				~(DCA3_TAG_MAP_BIT_TO_SEL | DCA3_TAG_MAP_BIT_TO_INV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 			value = (apic_id & (1 << bit)) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		} else if (entry & DCA3_TAG_MAP_BIT_TO_INV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 			bit = entry & ~DCA3_TAG_MAP_BIT_TO_INV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 			value = (apic_id & (1 << bit)) ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 			value = (entry & DCA3_TAG_MAP_LITERAL_VAL) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		tag |= (value << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	return tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static const struct dca_ops ioat_dca_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	.add_requester		= ioat_dca_add_requester,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	.remove_requester	= ioat_dca_remove_requester,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	.get_tag		= ioat_dca_get_tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	.dev_managed		= ioat_dca_dev_managed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static int ioat_dca_count_dca_slots(void *iobase, u16 dca_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	int slots = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	u32 req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	u16 global_req_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	global_req_table = readw(iobase + dca_offset + IOAT3_DCA_GREQID_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	if (global_req_table == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		req = readl(iobase + global_req_table + (slots * sizeof(u32)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		slots++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	} while ((req & IOAT_DCA_GREQID_LASTID) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	return slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static inline int dca3_tag_map_invalid(u8 *tag_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	 * If the tag map is not programmed by the BIOS the default is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	 * 0x80 0x80 0x80 0x80 0x80 0x00 0x00 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	 * This an invalid map and will result in only 2 possible tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	 * 0x1F and 0x00.  0x00 is an invalid DCA tag so we know that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	 * this entire definition is invalid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	return ((tag_map[0] == DCA_TAG_MAP_VALID) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		(tag_map[1] == DCA_TAG_MAP_VALID) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		(tag_map[2] == DCA_TAG_MAP_VALID) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		(tag_map[3] == DCA_TAG_MAP_VALID) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		(tag_map[4] == DCA_TAG_MAP_VALID));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	struct dca_provider *dca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	struct ioat_dca_priv *ioatdca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	int slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	u16 dca_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	u16 csi_fsb_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	u16 pcie_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	u8 bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		u64 full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 			u32 low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 			u32 high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	} tag_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	if (!system_has_dca_enabled(pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	if (dca_offset == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	slots = ioat_dca_count_dca_slots(iobase, dca_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	if (slots == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	dca = alloc_dca_provider(&ioat_dca_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 				 struct_size(ioatdca, req_slots, slots));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	if (!dca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	ioatdca = dca_priv(dca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	ioatdca->iobase = iobase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	ioatdca->dca_base = iobase + dca_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	ioatdca->max_requesters = slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	/* some bios might not know to turn these on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	csi_fsb_control = readw(ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	if ((csi_fsb_control & IOAT3_CSI_CONTROL_PREFETCH) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 		csi_fsb_control |= IOAT3_CSI_CONTROL_PREFETCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 		writew(csi_fsb_control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 		       ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	pcie_control = readw(ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	if ((pcie_control & IOAT3_PCI_CONTROL_MEMWR) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		pcie_control |= IOAT3_PCI_CONTROL_MEMWR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		writew(pcie_control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 		       ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	/* TODO version, compatibility and configuration checks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	/* copy out the APIC to DCA tag map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	tag_map.low =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 		readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	tag_map.high =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	for (i = 0; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 		bit = tag_map.full >> (8 * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	if (dca3_tag_map_invalid(ioatdca->tag_map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		pr_warn_once("%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 			     dev_driver_string(&pdev->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 			     dev_name(&pdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		free_dca_provider(dca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	err = register_dca_provider(dca, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		free_dca_provider(dca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	return dca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }