^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2006, Segher Boessenkool, IBM Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright 2006-2007, Michael Ellerman, IBM Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/msi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <asm/mpic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/prom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/hw_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/ppc-pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/msi_bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "mpic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) /* A bit ugly, can we get this from the pci_dev somehow? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) static struct mpic *msi_mpic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static void mpic_u3msi_mask_irq(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) pci_msi_mask_irq(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) mpic_mask_irq(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static void mpic_u3msi_unmask_irq(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) mpic_unmask_irq(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) pci_msi_unmask_irq(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static struct irq_chip mpic_u3msi_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) .irq_shutdown = mpic_u3msi_mask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) .irq_mask = mpic_u3msi_mask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) .irq_unmask = mpic_u3msi_unmask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) .irq_eoi = mpic_end_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) .irq_set_type = mpic_set_irq_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) .irq_set_affinity = mpic_set_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) .name = "MPIC-U3MSI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static u64 read_ht_magic_addr(struct pci_dev *pdev, unsigned int pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) u8 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) pci_read_config_byte(pdev, pos + HT_MSI_FLAGS, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (flags & HT_MSI_FLAGS_FIXED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return HT_MSI_FIXED_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) pci_read_config_dword(pdev, pos + HT_MSI_ADDR_LO, &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) addr = tmp & HT_MSI_ADDR_LO_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) pci_read_config_dword(pdev, pos + HT_MSI_ADDR_HI, &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) addr = addr | ((u64)tmp << 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static u64 find_ht_magic_addr(struct pci_dev *pdev, unsigned int hwirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct pci_bus *bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) unsigned int pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) for (bus = pdev->bus; bus && bus->self; bus = bus->parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) pos = pci_find_ht_capability(bus->self, HT_CAPTYPE_MSI_MAPPING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return read_ht_magic_addr(bus->self, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static u64 find_u4_magic_addr(struct pci_dev *pdev, unsigned int hwirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct pci_controller *hose = pci_bus_to_host(pdev->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /* U4 PCIe MSIs need to write to the special register in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * the bridge that generates interrupts. There should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * theorically a register at 0xf8005000 where you just write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * the MSI number and that triggers the right interrupt, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * unfortunately, this is busted in HW, the bridge endian swaps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * the value and hits the wrong nibble in the register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * So instead we use another register set which is used normally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * for converting HT interrupts to MPIC interrupts, which decodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * the interrupt number as part of the low address bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * This will not work if we ever use more than one legacy MSI in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * a block but we never do. For one MSI or multiple MSI-X where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * each interrupt address can be specified separately, it works
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * just fine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (of_device_is_compatible(hose->dn, "u4-pcie") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) of_device_is_compatible(hose->dn, "U4-pcie"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return 0xf8004000 | (hwirq << 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static void u3msi_teardown_msi_irqs(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct msi_desc *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) irq_hw_number_t hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) for_each_pci_msi_entry(entry, pdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (!entry->irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) hwirq = virq_to_hw(entry->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) irq_set_msi_desc(entry->irq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) irq_dispose_mapping(entry->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) unsigned int virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct msi_desc *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct msi_msg msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) int hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (type == PCI_CAP_ID_MSIX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) pr_debug("u3msi: MSI-X untested, trying anyway.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* If we can't find a magic address then MSI ain't gonna work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (find_ht_magic_addr(pdev, 0) == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) find_u4_magic_addr(pdev, 0) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) pr_debug("u3msi: no magic address found for %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) pci_name(pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) for_each_pci_msi_entry(entry, pdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) hwirq = msi_bitmap_alloc_hwirqs(&msi_mpic->msi_bitmap, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (hwirq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) pr_debug("u3msi: failed allocating hwirq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) addr = find_ht_magic_addr(pdev, hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (addr == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) addr = find_u4_magic_addr(pdev, hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) msg.address_lo = addr & 0xFFFFFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) msg.address_hi = addr >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) virq = irq_create_mapping(msi_mpic->irqhost, hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (!virq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) pr_debug("u3msi: failed mapping hwirq 0x%x\n", hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) irq_set_msi_desc(virq, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) irq_set_chip(virq, &mpic_u3msi_chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) pr_debug("u3msi: allocated virq 0x%x (hw 0x%x) addr 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) virq, hwirq, (unsigned long)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) printk("u3msi: allocated virq 0x%x (hw 0x%x) addr 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) virq, hwirq, (unsigned long)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) msg.data = hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) pci_write_msi_msg(virq, &msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) hwirq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) int mpic_u3msi_init(struct mpic *mpic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct pci_controller *phb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) rc = mpic_msi_init_allocator(mpic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) pr_debug("u3msi: Error allocating bitmap!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) pr_debug("u3msi: Registering MPIC U3 MSI callbacks.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) BUG_ON(msi_mpic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) msi_mpic = mpic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) list_for_each_entry(phb, &hose_list, list_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) WARN_ON(phb->controller_ops.setup_msi_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) phb->controller_ops.setup_msi_irqs = u3msi_setup_msi_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) phb->controller_ops.teardown_msi_irqs = u3msi_teardown_msi_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }