^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Synopsys DesignWare PCIe host controller driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2013 Samsung Electronics Co., Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * https://www.samsung.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Author: Jingoo Han <jg1.han@samsung.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/irqchip/chained_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/msi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/of_pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/pci_regs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "../../pci.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "pcie-designware.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) static struct pci_ops dw_pcie_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static struct pci_ops dw_child_pcie_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static void dw_msi_ack_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) irq_chip_ack_parent(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static void dw_msi_mask_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) pci_msi_mask_irq(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) irq_chip_mask_parent(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static void dw_msi_unmask_irq(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) pci_msi_unmask_irq(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) irq_chip_unmask_parent(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static struct irq_chip dw_pcie_msi_irq_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) .name = "PCI-MSI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) .irq_ack = dw_msi_ack_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) .irq_mask = dw_msi_mask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) .irq_unmask = dw_msi_unmask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static struct msi_domain_info dw_pcie_msi_domain_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) .chip = &dw_pcie_msi_irq_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /* MSI int handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) int i, pos, irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) u32 status, num_ctrls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) irqreturn_t ret = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) for (i = 0; i < num_ctrls; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) (i * MSI_REG_CTRL_BLOCK_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (!status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) ret = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) val = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) pos)) != MAX_MSI_IRQS_PER_CTRL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) irq = irq_find_mapping(pp->irq_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) (i * MAX_MSI_IRQS_PER_CTRL) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) generic_handle_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) pos++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) EXPORT_SYMBOL_GPL(dw_handle_msi_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /* Chained MSI interrupt service routine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static void dw_chained_msi_isr(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct irq_chip *chip = irq_desc_get_chip(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct pcie_port *pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) chained_irq_enter(chip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) pp = irq_desc_get_handler_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) dw_handle_msi_irq(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) chained_irq_exit(chip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct pcie_port *pp = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) u64 msi_target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) msi_target = (u64)pp->msi_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) msg->address_lo = lower_32_bits(msi_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) msg->address_hi = upper_32_bits(msi_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) msg->data = d->hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) (int)d->hwirq, msg->address_hi, msg->address_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static int dw_pci_msi_set_affinity(struct irq_data *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) const struct cpumask *mask, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static void dw_pci_bottom_mask(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct pcie_port *pp = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) unsigned int res, bit, ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) raw_spin_lock_irqsave(&pp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) pp->irq_mask[ctrl] |= BIT(bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) raw_spin_unlock_irqrestore(&pp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static void dw_pci_bottom_unmask(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct pcie_port *pp = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) unsigned int res, bit, ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) raw_spin_lock_irqsave(&pp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) pp->irq_mask[ctrl] &= ~BIT(bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) raw_spin_unlock_irqrestore(&pp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static void dw_pci_bottom_ack(struct irq_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct pcie_port *pp = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) unsigned int res, bit, ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static struct irq_chip dw_pci_msi_bottom_irq_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) .name = "DWPCI-MSI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) .irq_ack = dw_pci_bottom_ack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) .irq_compose_msi_msg = dw_pci_setup_msi_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) .irq_set_affinity = dw_pci_msi_set_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) .irq_mask = dw_pci_bottom_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) .irq_unmask = dw_pci_bottom_unmask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) unsigned int virq, unsigned int nr_irqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) void *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct pcie_port *pp = domain->host_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) int bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) raw_spin_lock_irqsave(&pp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) order_base_2(nr_irqs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) raw_spin_unlock_irqrestore(&pp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (bit < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) for (i = 0; i < nr_irqs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) irq_domain_set_info(domain, virq + i, bit + i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) pp->msi_irq_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) pp, handle_edge_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static void dw_pcie_irq_domain_free(struct irq_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) unsigned int virq, unsigned int nr_irqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct irq_data *d = irq_domain_get_irq_data(domain, virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct pcie_port *pp = domain->host_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) raw_spin_lock_irqsave(&pp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) bitmap_release_region(pp->msi_irq_in_use, d->hwirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) order_base_2(nr_irqs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) raw_spin_unlock_irqrestore(&pp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) .alloc = dw_pcie_irq_domain_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) .free = dw_pcie_irq_domain_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) int dw_pcie_allocate_domains(struct pcie_port *pp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) &dw_pcie_msi_domain_ops, pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (!pp->irq_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) dev_err(pci->dev, "Failed to create IRQ domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) pp->msi_domain = pci_msi_create_irq_domain(fwnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) &dw_pcie_msi_domain_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) pp->irq_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (!pp->msi_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) dev_err(pci->dev, "Failed to create MSI domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) irq_domain_remove(pp->irq_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) void dw_pcie_free_msi(struct pcie_port *pp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (pp->msi_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) irq_set_chained_handler(pp->msi_irq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) irq_set_handler_data(pp->msi_irq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) irq_domain_remove(pp->msi_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) irq_domain_remove(pp->irq_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (pp->msi_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct device *dev = pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) dma_unmap_single_attrs(dev, pp->msi_data, sizeof(pp->msi_msg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) void dw_pcie_msi_init(struct pcie_port *pp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) u64 msi_target = (u64)pp->msi_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (!IS_ENABLED(CONFIG_PCI_MSI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /* Program the msi_data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) EXPORT_SYMBOL_GPL(dw_pcie_msi_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) int dw_pcie_host_init(struct pcie_port *pp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct device *dev = pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct platform_device *pdev = to_platform_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct resource_entry *win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct pci_host_bridge *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct resource *cfg_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) raw_spin_lock_init(&pci->pp.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (cfg_res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) pp->cfg0_size = resource_size(cfg_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) pp->cfg0_base = cfg_res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) } else if (!pp->va_cfg0_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) dev_err(dev, "Missing *config* reg space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) bridge = devm_pci_alloc_host_bridge(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (!bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) pp->bridge = bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /* Get the I/O and memory ranges from DT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) resource_list_for_each_entry(win, &bridge->windows) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) switch (resource_type(win->res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) case IORESOURCE_IO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) pp->io_size = resource_size(win->res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) pp->io_bus_addr = win->res->start - win->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) pp->io_base = pci_pio_to_address(win->res->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) dev_err(dev, "Missing *config* reg space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) pp->cfg0_size = resource_size(win->res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) pp->cfg0_base = win->res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (!pci->dbi_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) pci->dbi_base = devm_pci_remap_cfgspace(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) pp->cfg0_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) pp->cfg0_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (!pci->dbi_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) dev_err(dev, "Error with ioremap\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (!pp->va_cfg0_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) pp->cfg0_base, pp->cfg0_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (!pp->va_cfg0_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) dev_err(dev, "Error with ioremap in function\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) pci->num_viewport = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (pci->link_gen < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) pci->link_gen = of_pci_get_max_link_speed(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (pci_msi_enabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * If a specific SoC driver needs to change the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * default number of vectors, it needs to implement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * the set_num_vectors callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (!pp->ops->set_num_vectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) pp->num_vectors = MSI_DEF_NUM_VECTORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) pp->ops->set_num_vectors(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (pp->num_vectors > MAX_MSI_IRQS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) pp->num_vectors == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) dev_err(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) "Invalid number of vectors\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (!pp->ops->msi_host_init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) ret = dw_pcie_allocate_domains(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (pp->msi_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) irq_set_chained_handler_and_data(pp->msi_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) dw_chained_msi_isr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) pp->msi_data = dma_map_single_attrs(pci->dev, &pp->msi_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) sizeof(pp->msi_msg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) DMA_FROM_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) DMA_ATTR_SKIP_CPU_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (dma_mapping_error(pci->dev, pp->msi_data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) dev_err(pci->dev, "Failed to map MSI data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) pp->msi_data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) goto err_free_msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) ret = pp->ops->msi_host_init(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /* Set default bus ops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) bridge->ops = &dw_pcie_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) bridge->child_ops = &dw_child_pcie_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (pp->ops->host_init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) ret = pp->ops->host_init(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) goto err_free_msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) bridge->sysdata = pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) ret = pci_host_probe(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) err_free_msi:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (pci_msi_enabled() && !pp->ops->msi_host_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) dw_pcie_free_msi(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) EXPORT_SYMBOL_GPL(dw_pcie_host_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) void dw_pcie_host_deinit(struct pcie_port *pp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) pci_stop_root_bus(pp->bridge->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) pci_remove_root_bus(pp->bridge->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (pci_msi_enabled() && !pp->ops->msi_host_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) dw_pcie_free_msi(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) unsigned int devfn, int where)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) u32 busdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) struct pcie_port *pp = bus->sysdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * Checking whether the link is up here is a last line of defense
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * against platforms that forward errors on the system bus as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * SError upon PCI configuration transactions issued when the link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * is down. This check is racy by definition and does not stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * the system from triggering an SError if the link goes down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * after this check is performed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (!dw_pcie_link_up(pci))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) PCIE_ATU_FUNC(PCI_FUNC(devfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (pci_is_root_bus(bus->parent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) type = PCIE_ATU_TYPE_CFG0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) type = PCIE_ATU_TYPE_CFG1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev, pp->cfg0_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return pp->va_cfg0_base + where;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) int where, int size, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct pcie_port *pp = bus->sysdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) ret = pci_generic_config_read(bus, devfn, where, size, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (!ret && pci->io_cfg_atu_shared)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) pp->io_bus_addr, pp->io_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) int where, int size, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct pcie_port *pp = bus->sysdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) ret = pci_generic_config_write(bus, devfn, where, size, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (!ret && pci->io_cfg_atu_shared)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) pp->io_bus_addr, pp->io_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) static struct pci_ops dw_child_pcie_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) .map_bus = dw_pcie_other_conf_map_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) .read = dw_pcie_rd_other_conf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) .write = dw_pcie_wr_other_conf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) struct pcie_port *pp = bus->sysdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if (PCI_SLOT(devfn) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) return pci->dbi_base + where;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) static struct pci_ops dw_pcie_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) .map_bus = dw_pcie_own_conf_map_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) .read = pci_generic_config_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) .write = pci_generic_config_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) void dw_pcie_setup_rc(struct pcie_port *pp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) u32 val, ctrl, num_ctrls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * Enable DBI read-only registers for writing/updating configuration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * Write permission gets disabled towards the end of this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) dw_pcie_dbi_ro_wr_en(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) dw_pcie_setup(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (pci_msi_enabled() && !pp->ops->msi_host_init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /* Initialize IRQ Status array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) pp->irq_mask[ctrl] = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) pp->irq_mask[ctrl]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) /* Setup RC BARs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /* Setup interrupt pins */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) val &= 0xffff00ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) val |= 0x00000100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) /* Setup bus numbers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) val &= 0xff000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) val |= 0x00ff0100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /* Setup command register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) val &= 0xffff0000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * If the platform provides its own child bus config accesses, it means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * the platform uses its own address translation component rather than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * ATU, so we should not program the ATU here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (pp->bridge->child_ops == &dw_child_pcie_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) int atu_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) struct resource_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) /* Get last memory resource entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) resource_list_for_each_entry(entry, &pp->bridge->windows) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (resource_type(entry->res) != IORESOURCE_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (pci->num_viewport <= ++atu_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) dw_pcie_prog_outbound_atu(pci, atu_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) PCIE_ATU_TYPE_MEM, entry->res->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) entry->res->start - entry->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) resource_size(entry->res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (pp->io_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (pci->num_viewport > ++atu_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) dw_pcie_prog_outbound_atu(pci, atu_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) PCIE_ATU_TYPE_IO, pp->io_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) pp->io_bus_addr, pp->io_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) pci->io_cfg_atu_shared = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (pci->num_viewport <= atu_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) dev_warn(pci->dev, "Resources exceed number of ATU entries (%d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) pci->num_viewport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) /* Program correct class for RC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) val |= PORT_LOGIC_SPEED_CHANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) dw_pcie_dbi_ro_wr_dis(pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);