^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/irqchip/chained_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/pci-ecam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/msi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #define MSI_MAX 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define SMP8759_MUX 0x48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #define SMP8759_TEST_OUT 0x74
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define SMP8759_DOORBELL 0x7c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define SMP8759_STATUS 0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define SMP8759_ENABLE 0xa0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) struct tango_pcie {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) DECLARE_BITMAP(used_msi, MSI_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) u64 msi_doorbell;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) spinlock_t used_msi_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct irq_domain *dom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static void tango_msi_isr(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct irq_chip *chip = irq_desc_get_chip(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct tango_pcie *pcie = irq_desc_get_handler_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) unsigned long status, base, virq, idx, pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) chained_irq_enter(chip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) spin_lock(&pcie->used_msi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) while ((pos = find_next_bit(pcie->used_msi, MSI_MAX, pos)) < MSI_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) base = round_down(pos, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) status = readl_relaxed(pcie->base + SMP8759_STATUS + base / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) for_each_set_bit(idx, &status, 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) virq = irq_find_mapping(pcie->dom, base + idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) generic_handle_irq(virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) pos = base + 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) spin_unlock(&pcie->used_msi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) chained_irq_exit(chip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static void tango_ack(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct tango_pcie *pcie = d->chip_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) u32 offset = (d->hwirq / 32) * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) u32 bit = BIT(d->hwirq % 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) writel_relaxed(bit, pcie->base + SMP8759_STATUS + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static void update_msi_enable(struct irq_data *d, bool unmask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct tango_pcie *pcie = d->chip_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) u32 offset = (d->hwirq / 32) * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) u32 bit = BIT(d->hwirq % 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) spin_lock_irqsave(&pcie->used_msi_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) val = readl_relaxed(pcie->base + SMP8759_ENABLE + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) val = unmask ? val | bit : val & ~bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) writel_relaxed(val, pcie->base + SMP8759_ENABLE + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) spin_unlock_irqrestore(&pcie->used_msi_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static void tango_mask(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) update_msi_enable(d, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static void tango_unmask(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) update_msi_enable(d, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static int tango_set_affinity(struct irq_data *d, const struct cpumask *mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static void tango_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct tango_pcie *pcie = d->chip_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) msg->address_lo = lower_32_bits(pcie->msi_doorbell);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) msg->address_hi = upper_32_bits(pcie->msi_doorbell);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) msg->data = d->hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static struct irq_chip tango_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) .irq_ack = tango_ack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) .irq_mask = tango_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) .irq_unmask = tango_unmask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) .irq_set_affinity = tango_set_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) .irq_compose_msi_msg = tango_compose_msi_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static void msi_ack(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) irq_chip_ack_parent(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static void msi_mask(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) pci_msi_mask_irq(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) irq_chip_mask_parent(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static void msi_unmask(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) pci_msi_unmask_irq(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) irq_chip_unmask_parent(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static struct irq_chip msi_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) .name = "MSI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) .irq_ack = msi_ack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) .irq_mask = msi_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) .irq_unmask = msi_unmask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static struct msi_domain_info msi_dom_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) .flags = MSI_FLAG_PCI_MSIX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) | MSI_FLAG_USE_DEF_DOM_OPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) | MSI_FLAG_USE_DEF_CHIP_OPS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) .chip = &msi_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static int tango_irq_domain_alloc(struct irq_domain *dom, unsigned int virq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) unsigned int nr_irqs, void *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct tango_pcie *pcie = dom->host_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) int pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) spin_lock_irqsave(&pcie->used_msi_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) pos = find_first_zero_bit(pcie->used_msi, MSI_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (pos >= MSI_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) spin_unlock_irqrestore(&pcie->used_msi_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) __set_bit(pos, pcie->used_msi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) spin_unlock_irqrestore(&pcie->used_msi_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) irq_domain_set_info(dom, virq, pos, &tango_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) pcie, handle_edge_irq, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static void tango_irq_domain_free(struct irq_domain *dom, unsigned int virq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) unsigned int nr_irqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct irq_data *d = irq_domain_get_irq_data(dom, virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct tango_pcie *pcie = d->chip_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) spin_lock_irqsave(&pcie->used_msi_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) __clear_bit(d->hwirq, pcie->used_msi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) spin_unlock_irqrestore(&pcie->used_msi_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static const struct irq_domain_ops dom_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) .alloc = tango_irq_domain_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) .free = tango_irq_domain_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static int smp8759_config_read(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) int where, int size, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct pci_config_window *cfg = bus->sysdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct tango_pcie *pcie = dev_get_drvdata(cfg->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /* Reads in configuration space outside devfn 0 return garbage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (devfn != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return PCIBIOS_FUNC_NOT_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * PCI config and MMIO accesses are muxed. Linux doesn't have a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * mutual exclusion mechanism for config vs. MMIO accesses, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * concurrent accesses may cause corruption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) writel_relaxed(1, pcie->base + SMP8759_MUX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) ret = pci_generic_config_read(bus, devfn, where, size, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) writel_relaxed(0, pcie->base + SMP8759_MUX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static int smp8759_config_write(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) int where, int size, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct pci_config_window *cfg = bus->sysdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct tango_pcie *pcie = dev_get_drvdata(cfg->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) writel_relaxed(1, pcie->base + SMP8759_MUX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) ret = pci_generic_config_write(bus, devfn, where, size, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) writel_relaxed(0, pcie->base + SMP8759_MUX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static const struct pci_ecam_ops smp8759_ecam_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) .bus_shift = 20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) .pci_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) .map_bus = pci_ecam_map_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) .read = smp8759_config_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) .write = smp8759_config_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static int tango_pcie_link_up(struct tango_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) void __iomem *test_out = pcie->base + SMP8759_TEST_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) writel_relaxed(16, test_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) for (i = 0; i < 10; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) u32 ltssm_state = readl_relaxed(test_out) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if ((ltssm_state & 0x1f) == 0xf) /* L0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) usleep_range(3000, 4000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static int tango_pcie_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct tango_pcie *pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct irq_domain *msi_dom, *irq_dom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct of_pci_range_parser parser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct of_pci_range range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) int virq, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) dev_warn(dev, "simultaneous PCI config and MMIO accesses may cause data corruption\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (!pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) pcie->base = devm_ioremap_resource(dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (IS_ERR(pcie->base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return PTR_ERR(pcie->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) platform_set_drvdata(pdev, pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (!tango_pcie_link_up(pcie))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (of_pci_dma_range_parser_init(&parser, dev->of_node) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (of_pci_range_parser_one(&parser, &range) == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) range.pci_addr += range.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) pcie->msi_doorbell = range.pci_addr + res->start + SMP8759_DOORBELL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) for (offset = 0; offset < MSI_MAX / 8; offset += 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) writel_relaxed(0, pcie->base + SMP8759_ENABLE + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) virq = platform_get_irq(pdev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (virq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) irq_dom = irq_domain_create_linear(fwnode, MSI_MAX, &dom_ops, pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (!irq_dom) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) dev_err(dev, "Failed to create IRQ domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) msi_dom = pci_msi_create_irq_domain(fwnode, &msi_dom_info, irq_dom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (!msi_dom) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) dev_err(dev, "Failed to create MSI domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) irq_domain_remove(irq_dom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) pcie->dom = irq_dom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) spin_lock_init(&pcie->used_msi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) irq_set_chained_handler_and_data(virq, tango_msi_isr, pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return pci_host_common_probe(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static const struct of_device_id tango_pcie_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) .compatible = "sigma,smp8759-pcie",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) .data = &smp8759_ecam_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static struct platform_driver tango_pcie_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) .probe = tango_pcie_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) .name = KBUILD_MODNAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) .of_match_table = tango_pcie_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) .suppress_bind_attrs = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) builtin_platform_driver(tango_pcie_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * The root complex advertises the wrong device class.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * Header Type 1 is for PCI-to-PCI bridges.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) static void tango_fixup_class(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) dev->class = PCI_CLASS_BRIDGE_PCI << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0024, tango_fixup_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0028, tango_fixup_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * The root complex exposes a "fake" BAR, which is used to filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * bus-to-system accesses. Only accesses within the range defined by this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * BAR are forwarded to the host, others are ignored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * By default, the DMA framework expects an identity mapping, and DRAM0 is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * mapped at 0x80000000.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) static void tango_fixup_bar(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) dev->non_compliant_bars = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, 0x80000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0024, tango_fixup_bar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0028, tango_fixup_bar);