Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * MSI hooks for standard x86 apic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/msi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/dmar.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <asm/msidef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) static struct irq_chip	ia64_msi_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) static int ia64_set_msi_irq_affinity(struct irq_data *idata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 				     const cpumask_t *cpu_mask, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 	struct msi_msg msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	u32 addr, data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	int cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	unsigned int irq = idata->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	if (irq_prepare_move(irq, cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	__get_cached_msi_msg(irq_data_get_msi_desc(idata), &msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	addr = msg.address_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	addr &= MSI_ADDR_DEST_ID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	addr |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	msg.address_lo = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	data = msg.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	data &= MSI_DATA_VECTOR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	data |= MSI_DATA_VECTOR(irq_to_vector(irq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	msg.data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	pci_write_msi_msg(irq, &msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	cpumask_copy(irq_data_get_affinity_mask(idata), cpumask_of(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	struct msi_msg	msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	unsigned long	dest_phys_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	int	irq, vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	irq = create_irq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	irq_set_msi_desc(irq, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	dest_phys_id = cpu_physical_id(cpumask_any_and(&(irq_to_domain(irq)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 						       cpu_online_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	vector = irq_to_vector(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	msg.address_hi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	msg.address_lo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 		MSI_ADDR_HEADER |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 		MSI_ADDR_DEST_MODE_PHYS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 		MSI_ADDR_REDIRECTION_CPU |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 		MSI_ADDR_DEST_ID_CPU(dest_phys_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	msg.data =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 		MSI_DATA_TRIGGER_EDGE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 		MSI_DATA_LEVEL_ASSERT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		MSI_DATA_DELIVERY_FIXED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		MSI_DATA_VECTOR(vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	pci_write_msi_msg(irq, &msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	irq_set_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) void arch_teardown_msi_irq(unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	destroy_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) static void ia64_ack_msi_irq(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	irq_complete_move(data->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	irq_move_irq(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	ia64_eoi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) static int ia64_msi_retrigger_irq(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	unsigned int vector = irq_to_vector(data->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	ia64_resend_irq(vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  * Generic ops used on most IA64 platforms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static struct irq_chip ia64_msi_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	.name			= "PCI-MSI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	.irq_mask		= pci_msi_mask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	.irq_unmask		= pci_msi_unmask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	.irq_ack		= ia64_ack_msi_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	.irq_set_affinity	= ia64_set_msi_irq_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	.irq_retrigger		= ia64_msi_retrigger_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #ifdef CONFIG_INTEL_IOMMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static int dmar_msi_set_affinity(struct irq_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 				 const struct cpumask *mask, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	unsigned int irq = data->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	struct irq_cfg *cfg = irq_cfg + irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	struct msi_msg msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	int cpu = cpumask_first_and(mask, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	if (irq_prepare_move(irq, cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	dmar_msi_read(irq, &msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	msg.data &= ~MSI_DATA_VECTOR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	msg.data |= MSI_DATA_VECTOR(cfg->vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	dmar_msi_write(irq, &msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	cpumask_copy(irq_data_get_affinity_mask(data), mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static struct irq_chip dmar_msi_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	.name = "DMAR_MSI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	.irq_unmask = dmar_msi_unmask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	.irq_mask = dmar_msi_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	.irq_ack = ia64_ack_msi_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	.irq_set_affinity = dmar_msi_set_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	.irq_retrigger = ia64_msi_retrigger_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	struct irq_cfg *cfg = irq_cfg + irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	unsigned dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	dest = cpu_physical_id(cpumask_first_and(&(irq_to_domain(irq)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 						 cpu_online_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	msg->address_hi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	msg->address_lo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		MSI_ADDR_HEADER |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		MSI_ADDR_DEST_MODE_PHYS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		MSI_ADDR_REDIRECTION_CPU |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		MSI_ADDR_DEST_ID_CPU(dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	msg->data =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		MSI_DATA_TRIGGER_EDGE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		MSI_DATA_LEVEL_ASSERT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		MSI_DATA_DELIVERY_FIXED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		MSI_DATA_VECTOR(cfg->vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) int dmar_alloc_hwirq(int id, int node, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	struct msi_msg msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	irq = create_irq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	if (irq > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		irq_set_handler_data(irq, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		irq_set_chip_and_handler_name(irq, &dmar_msi_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 					      handle_edge_irq, "edge");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		msi_compose_msg(NULL, irq, &msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		dmar_msi_write(irq, &msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) void dmar_free_hwirq(int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	irq_set_handler_data(irq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	destroy_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #endif /* CONFIG_INTEL_IOMMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)