^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * PCIe driver for Renesas R-Car SoCs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2014-2020 Renesas Electronics Europe Ltd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Based on:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * arch/sh/drivers/pci/pcie-sh7786.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * arch/sh/drivers/pci/ops-sh7786.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 2009 - 2011 Paul Mundt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Author: Phil Edworthy <phil.edworthy@renesas.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/msi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/of_pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/phy/phy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "pcie-rcar.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct rcar_msi {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) DECLARE_BITMAP(used, INT_PCI_MSI_NR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct irq_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct msi_controller chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) unsigned long pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct mutex lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) int irq1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) int irq2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) return container_of(chip, struct rcar_msi, chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* Structure representing the PCIe interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct rcar_pcie_host {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct rcar_pcie pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct phy *phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct clk *bus_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct rcar_msi msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) int (*phy_init_fn)(struct rcar_pcie_host *host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) unsigned int shift = BITS_PER_BYTE * (where & 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) u32 val = rcar_pci_read_reg(pcie, where & ~3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return val >> shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static int rcar_pcie_config_access(struct rcar_pcie_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) unsigned char access_type, struct pci_bus *bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) unsigned int devfn, int where, u32 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct rcar_pcie *pcie = &host->pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) unsigned int dev, func, reg, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) dev = PCI_SLOT(devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) func = PCI_FUNC(devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) reg = where & ~3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) index = reg / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * While each channel has its own memory-mapped extended config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * space, it's generally only accessible when in endpoint mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * When in root complex mode, the controller is unable to target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * itself with either type 0 or type 1 accesses, and indeed, any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * controller initiated target transfer to its own config space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * result in a completer abort.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * Each channel effectively only supports a single device, but as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * the same channel <-> device access works for any PCI_SLOT()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * value, we cheat a bit here and bind the controller's config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * space to devfn 0 in order to enable self-enumeration. In this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * case the regular ECAR/ECDR path is sidelined and the mangled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * config access itself is initiated as an internal bus transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (pci_is_root_bus(bus)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (dev != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (access_type == RCAR_PCI_ACCESS_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) *data = rcar_pci_read_reg(pcie, PCICONF(index));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) rcar_pci_write_reg(pcie, *data, PCICONF(index));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* Clear errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* Set the PIO address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /* Enable the configuration access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (pci_is_root_bus(bus->parent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* Check for errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* Check for master and target aborts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) (PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (access_type == RCAR_PCI_ACCESS_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) *data = rcar_pci_read_reg(pcie, PCIECDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) rcar_pci_write_reg(pcie, *data, PCIECDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* Disable the configuration access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) rcar_pci_write_reg(pcie, 0, PCIECCTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) int where, int size, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct rcar_pcie_host *host = bus->sysdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) bus, devfn, where, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (ret != PCIBIOS_SUCCESSFUL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) *val = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (size == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) *val = (*val >> (BITS_PER_BYTE * (where & 3))) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) else if (size == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) *val = (*val >> (BITS_PER_BYTE * (where & 2))) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) bus->number, devfn, where, size, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) int where, int size, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct rcar_pcie_host *host = bus->sysdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) unsigned int shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) u32 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) bus, devfn, where, &data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (ret != PCIBIOS_SUCCESSFUL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) bus->number, devfn, where, size, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (size == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) shift = BITS_PER_BYTE * (where & 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) data &= ~(0xff << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) data |= ((val & 0xff) << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) } else if (size == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) shift = BITS_PER_BYTE * (where & 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) data &= ~(0xffff << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) data |= ((val & 0xffff) << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) data = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) bus, devfn, where, &data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static struct pci_ops rcar_pcie_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) .read = rcar_pcie_read_conf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) .write = rcar_pcie_write_conf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) unsigned int timeout = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) u32 macsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if ((rcar_pci_read_reg(pcie, MACS2R) & LINK_SPEED) != LINK_SPEED_5_0GTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) dev_err(dev, "Speed change already in progress\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) macsr = rcar_pci_read_reg(pcie, MACSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if ((macsr & LINK_SPEED) == LINK_SPEED_5_0GTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) /* Set target link speed to 5.0 GT/s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) PCI_EXP_LNKSTA_CLS_5_0GB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /* Set speed change reason as intentional factor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) rcar_rmw32(pcie, MACCGSPSETR, SPCNGRSN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /* Clear SPCHGFIN, SPCHGSUC, and SPCHGFAIL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (macsr & (SPCHGFIN | SPCHGSUC | SPCHGFAIL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) rcar_pci_write_reg(pcie, macsr, MACSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /* Start link speed change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) rcar_rmw32(pcie, MACCTLR, SPEED_CHANGE, SPEED_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) while (timeout--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) macsr = rcar_pci_read_reg(pcie, MACSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (macsr & SPCHGFIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /* Clear the interrupt bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) rcar_pci_write_reg(pcie, macsr, MACSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (macsr & SPCHGFAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) dev_err(dev, "Speed change failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) dev_err(dev, "Speed change timed out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) dev_info(dev, "Current link speed is %s GT/s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) (macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static void rcar_pcie_hw_enable(struct rcar_pcie_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct rcar_pcie *pcie = &host->pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct resource_entry *win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) LIST_HEAD(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /* Try setting 5 GT/s link speed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) rcar_pcie_force_speedup(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /* Setup PCI resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) resource_list_for_each_entry(win, &bridge->windows) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct resource *res = win->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (!res->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) switch (resource_type(res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) case IORESOURCE_IO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) case IORESOURCE_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) rcar_pcie_set_outbound(pcie, i, win);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static int rcar_pcie_enable(struct rcar_pcie_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) rcar_pcie_hw_enable(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) pci_add_flags(PCI_REASSIGN_ALL_BUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) bridge->sysdata = host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) bridge->ops = &rcar_pcie_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (IS_ENABLED(CONFIG_PCI_MSI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) bridge->msi = &host->msi.chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return pci_host_probe(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static int phy_wait_for_ack(struct rcar_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) unsigned int timeout = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) while (timeout--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) dev_err(dev, "Access to PCIe phy timed out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) static void phy_write_reg(struct rcar_pcie *pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) unsigned int rate, u32 addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) unsigned int lane, u32 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) u32 phyaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) phyaddr = WRITE_CMD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) ((rate & 1) << RATE_POS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) ((lane & 0xf) << LANE_POS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) ((addr & 0xff) << ADR_POS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /* Set write data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /* Ignore errors as they will be dealt with if the data link is down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) phy_wait_for_ack(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /* Clear command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /* Ignore errors as they will be dealt with if the data link is down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) phy_wait_for_ack(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /* Begin initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) rcar_pci_write_reg(pcie, 0, PCIETCTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /* Set mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) rcar_pci_write_reg(pcie, 1, PCIEMSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) err = rcar_pcie_wait_for_phyrdy(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * Initial header for port config space is type 1, set the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * class to match. Hardware takes care of propagating the IDSETR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * settings, so there is no need to bother with a quirk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * Setup Secondary Bus Number & Subordinate Bus Number, even though
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * they aren't used, to avoid bridge being detected as broken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /* Initialize default capabilities. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) PCI_HEADER_TYPE_BRIDGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) /* Enable data link layer active state reporting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) PCI_EXP_LNKCAP_DLLLARC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /* Write out the physical slot number = 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) /* Set the completion timer timeout to the maximum 50ms. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /* Terminate list of capabilities (Next Capability Offset=0) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /* Enable MSI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (IS_ENABLED(CONFIG_PCI_MSI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) /* Finish initialization - establish a PCI Express link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /* This will timeout if we don't have a link. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) err = rcar_pcie_wait_for_dl(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /* Enable INTx interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static int rcar_pcie_phy_init_h1(struct rcar_pcie_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct rcar_pcie *pcie = &host->pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /* Initialize the phy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static int rcar_pcie_phy_init_gen2(struct rcar_pcie_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct rcar_pcie *pcie = &host->pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * These settings come from the R-Car Series, 2nd Generation User's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * Manual, section 50.3.1 (2) Initialization of the physical layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) rcar_pci_write_reg(pcie, 0x000f0030, GEN2_PCIEPHYADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) rcar_pci_write_reg(pcie, 0x00381203, GEN2_PCIEPHYDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) rcar_pci_write_reg(pcie, 0x000f0054, GEN2_PCIEPHYADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) /* The following value is for DC connection, no termination resistor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) rcar_pci_write_reg(pcie, 0x13802007, GEN2_PCIEPHYDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) static int rcar_pcie_phy_init_gen3(struct rcar_pcie_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) err = phy_init(host->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) err = phy_power_on(host->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) phy_exit(host->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) static int rcar_msi_alloc(struct rcar_msi *chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) int msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) mutex_lock(&chip->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (msi < INT_PCI_MSI_NR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) set_bit(msi, chip->used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) msi = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) mutex_unlock(&chip->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) return msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) static int rcar_msi_alloc_region(struct rcar_msi *chip, int no_irqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) int msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) mutex_lock(&chip->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) msi = bitmap_find_free_region(chip->used, INT_PCI_MSI_NR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) order_base_2(no_irqs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) mutex_unlock(&chip->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) mutex_lock(&chip->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) clear_bit(irq, chip->used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) mutex_unlock(&chip->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) struct rcar_pcie_host *host = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) struct rcar_pcie *pcie = &host->pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) struct rcar_msi *msi = &host->msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) unsigned long reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) /* MSI & INTx share an interrupt - we only handle MSI here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (!reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) while (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) unsigned int index = find_first_bit(®, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) unsigned int msi_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) /* clear the interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) rcar_pci_write_reg(pcie, 1 << index, PCIEMSIFR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) msi_irq = irq_find_mapping(msi->domain, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (msi_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (test_bit(index, msi->used))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) generic_handle_irq(msi_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) dev_info(dev, "unhandled MSI\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) /* Unknown MSI, just clear it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) dev_dbg(dev, "unexpected MSI\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /* see if there's any more pending in this vector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) struct msi_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct rcar_msi *msi = to_rcar_msi(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct rcar_pcie_host *host = container_of(chip, struct rcar_pcie_host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) msi.chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) struct rcar_pcie *pcie = &host->pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) struct msi_msg msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) unsigned int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) int hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) hwirq = rcar_msi_alloc(msi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (hwirq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) return hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) irq = irq_find_mapping(msi->domain, hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (!irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) rcar_msi_free(msi, hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) irq_set_msi_desc(irq, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) msg.data = hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) pci_write_msi_msg(irq, &msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) static int rcar_msi_setup_irqs(struct msi_controller *chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) struct pci_dev *pdev, int nvec, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) struct rcar_msi *msi = to_rcar_msi(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) struct rcar_pcie_host *host = container_of(chip, struct rcar_pcie_host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) msi.chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) struct rcar_pcie *pcie = &host->pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) struct msi_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) struct msi_msg msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) unsigned int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) int hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /* MSI-X interrupts are not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (type == PCI_CAP_ID_MSIX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) WARN_ON(!list_is_singular(&pdev->dev.msi_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) hwirq = rcar_msi_alloc_region(msi, nvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (hwirq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) irq = irq_find_mapping(msi->domain, hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) if (!irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) for (i = 0; i < nvec; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * irq_create_mapping() called from rcar_pcie_probe() pre-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * allocates descs, so there is no need to allocate descs here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * We can therefore assume that if irq_find_mapping() above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * returns non-zero, then the descs are also successfully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (irq_set_msi_desc_off(irq, i, desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) /* TODO: clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) desc->nvec_used = nvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) desc->msi_attrib.multiple = order_base_2(nvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) msg.data = hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) pci_write_msi_msg(irq, &msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) struct rcar_msi *msi = to_rcar_msi(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) struct irq_data *d = irq_get_irq_data(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) rcar_msi_free(msi, d->hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) static struct irq_chip rcar_msi_irq_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) .name = "R-Car PCIe MSI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) .irq_enable = pci_msi_unmask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) .irq_disable = pci_msi_mask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) .irq_mask = pci_msi_mask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) .irq_unmask = pci_msi_unmask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) static int rcar_msi_map(struct irq_domain *domain, unsigned int irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) irq_hw_number_t hwirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) irq_set_chip_data(irq, domain->host_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) static const struct irq_domain_ops msi_domain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) .map = rcar_msi_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) static void rcar_pcie_unmap_msi(struct rcar_pcie_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) struct rcar_msi *msi = &host->msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) int i, irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) for (i = 0; i < INT_PCI_MSI_NR; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) irq = irq_find_mapping(msi->domain, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (irq > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) irq_dispose_mapping(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) irq_domain_remove(msi->domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) static void rcar_pcie_hw_enable_msi(struct rcar_pcie_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) struct rcar_pcie *pcie = &host->pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) struct rcar_msi *msi = &host->msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) unsigned long base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) /* setup MSI data target */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) base = virt_to_phys((void *)msi->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) rcar_pci_write_reg(pcie, lower_32_bits(base) | MSIFE, PCIEMSIALR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) rcar_pci_write_reg(pcie, upper_32_bits(base), PCIEMSIAUR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /* enable all MSI interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) struct rcar_pcie *pcie = &host->pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct rcar_msi *msi = &host->msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) mutex_init(&msi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) msi->chip.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) msi->chip.setup_irq = rcar_msi_setup_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) msi->chip.setup_irqs = rcar_msi_setup_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) msi->chip.teardown_irq = rcar_msi_teardown_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) &msi_domain_ops, &msi->chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (!msi->domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) dev_err(dev, "failed to create IRQ domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) for (i = 0; i < INT_PCI_MSI_NR; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) irq_create_mapping(msi->domain, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) /* Two irqs are for MSI, but they are also used for non-MSI irqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) IRQF_SHARED | IRQF_NO_THREAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) rcar_msi_irq_chip.name, host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) dev_err(dev, "failed to request IRQ: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) IRQF_SHARED | IRQF_NO_THREAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) rcar_msi_irq_chip.name, host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) dev_err(dev, "failed to request IRQ: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) /* setup MSI data target */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) msi->pages = __get_free_pages(GFP_KERNEL | GFP_DMA32, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) rcar_pcie_hw_enable_msi(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) rcar_pcie_unmap_msi(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) static void rcar_pcie_teardown_msi(struct rcar_pcie_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) struct rcar_pcie *pcie = &host->pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) struct rcar_msi *msi = &host->msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) /* Disable all MSI interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /* Disable address decoding of the MSI interrupt, MSIFE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) rcar_pci_write_reg(pcie, 0, PCIEMSIALR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) free_pages(msi->pages, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) rcar_pcie_unmap_msi(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) static int rcar_pcie_get_resources(struct rcar_pcie_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) struct rcar_pcie *pcie = &host->pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) struct resource res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) host->phy = devm_phy_optional_get(dev, "pcie");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (IS_ERR(host->phy))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) return PTR_ERR(host->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) err = of_address_to_resource(dev->of_node, 0, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) pcie->base = devm_ioremap_resource(dev, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) if (IS_ERR(pcie->base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) return PTR_ERR(pcie->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) host->bus_clk = devm_clk_get(dev, "pcie_bus");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (IS_ERR(host->bus_clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) dev_err(dev, "cannot get pcie bus clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) return PTR_ERR(host->bus_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) i = irq_of_parse_and_map(dev->of_node, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (!i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) dev_err(dev, "cannot get platform resources for msi interrupt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) goto err_irq1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) host->msi.irq1 = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) i = irq_of_parse_and_map(dev->of_node, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (!i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) dev_err(dev, "cannot get platform resources for msi interrupt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) goto err_irq2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) host->msi.irq2 = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) err_irq2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) irq_dispose_mapping(host->msi.irq1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) err_irq1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) struct resource_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) int *index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) u64 restype = entry->res->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) u64 cpu_addr = entry->res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) u64 cpu_end = entry->res->end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) u64 pci_addr = entry->res->start - entry->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) u32 flags = LAM_64BIT | LAR_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) u64 size = resource_size(entry->res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) int idx = *index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (restype & IORESOURCE_PREFETCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) flags |= LAM_PREFETCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) while (cpu_addr < cpu_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (idx >= MAX_NR_INBOUND_MAPS - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) dev_err(pcie->dev, "Failed to map inbound regions!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * If the size of the range is larger than the alignment of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * the start address, we have to use multiple entries to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * perform the mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (cpu_addr > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) unsigned long nr_zeros = __ffs64(cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) u64 alignment = 1ULL << nr_zeros;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) size = min(size, alignment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) /* Hardware supports max 4GiB inbound region */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) size = min(size, 1ULL << 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) mask = roundup_pow_of_two(size) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) mask &= ~0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) rcar_pcie_set_inbound(pcie, cpu_addr, pci_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) lower_32_bits(mask) | flags, idx, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) pci_addr += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) cpu_addr += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) idx += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) *index = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) struct resource_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) int index = 0, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) resource_list_for_each_entry(entry, &bridge->dma_ranges) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) err = rcar_pcie_inbound_ranges(&host->pcie, entry, &index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) static const struct of_device_id rcar_pcie_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) { .compatible = "renesas,pcie-r8a7779",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) .data = rcar_pcie_phy_init_h1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) { .compatible = "renesas,pcie-r8a7790",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) .data = rcar_pcie_phy_init_gen2 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) { .compatible = "renesas,pcie-r8a7791",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) .data = rcar_pcie_phy_init_gen2 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) { .compatible = "renesas,pcie-rcar-gen2",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) .data = rcar_pcie_phy_init_gen2 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) { .compatible = "renesas,pcie-r8a7795",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) .data = rcar_pcie_phy_init_gen3 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) { .compatible = "renesas,pcie-rcar-gen3",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) .data = rcar_pcie_phy_init_gen3 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) static int rcar_pcie_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) struct rcar_pcie_host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) struct rcar_pcie *pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) u32 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) struct pci_host_bridge *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) bridge = devm_pci_alloc_host_bridge(dev, sizeof(*host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) if (!bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) host = pci_host_bridge_priv(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) pcie = &host->pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) pcie->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) platform_set_drvdata(pdev, host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) pm_runtime_enable(pcie->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) err = pm_runtime_get_sync(pcie->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) dev_err(pcie->dev, "pm_runtime_get_sync failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) goto err_pm_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) err = rcar_pcie_get_resources(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) dev_err(dev, "failed to request resources: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) goto err_pm_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) err = clk_prepare_enable(host->bus_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) dev_err(dev, "failed to enable bus clock: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) goto err_unmap_msi_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) err = rcar_pcie_parse_map_dma_ranges(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) goto err_clk_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) host->phy_init_fn = of_device_get_match_data(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) err = host->phy_init_fn(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) dev_err(dev, "failed to init PCIe PHY\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) goto err_clk_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) /* Failure to get a link might just be that no cards are inserted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) if (rcar_pcie_hw_init(pcie)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) dev_info(dev, "PCIe link down\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) goto err_phy_shutdown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) data = rcar_pci_read_reg(pcie, MACSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (IS_ENABLED(CONFIG_PCI_MSI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) err = rcar_pcie_enable_msi(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) dev_err(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) "failed to enable MSI support: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) goto err_phy_shutdown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) err = rcar_pcie_enable(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) goto err_msi_teardown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) err_msi_teardown:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) if (IS_ENABLED(CONFIG_PCI_MSI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) rcar_pcie_teardown_msi(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) err_phy_shutdown:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if (host->phy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) phy_power_off(host->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) phy_exit(host->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) err_clk_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) clk_disable_unprepare(host->bus_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) err_unmap_msi_irqs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) irq_dispose_mapping(host->msi.irq2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) irq_dispose_mapping(host->msi.irq1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) err_pm_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) pm_runtime_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) pm_runtime_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) static int __maybe_unused rcar_pcie_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) struct rcar_pcie_host *host = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) struct rcar_pcie *pcie = &host->pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) unsigned int data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) err = rcar_pcie_parse_map_dma_ranges(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) /* Failure to get a link might just be that no cards are inserted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) err = host->phy_init_fn(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) dev_info(dev, "PCIe link down\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) data = rcar_pci_read_reg(pcie, MACSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) /* Enable MSI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (IS_ENABLED(CONFIG_PCI_MSI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) rcar_pcie_hw_enable_msi(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) rcar_pcie_hw_enable(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) static int rcar_pcie_resume_noirq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) struct rcar_pcie_host *host = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) struct rcar_pcie *pcie = &host->pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) if (rcar_pci_read_reg(pcie, PMSR) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) !(rcar_pci_read_reg(pcie, PCIETCTLR) & DL_DOWN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) /* Re-establish the PCIe link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) return rcar_pcie_wait_for_dl(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) static const struct dev_pm_ops rcar_pcie_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) SET_SYSTEM_SLEEP_PM_OPS(NULL, rcar_pcie_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) .resume_noirq = rcar_pcie_resume_noirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) static struct platform_driver rcar_pcie_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) .name = "rcar-pcie",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) .of_match_table = rcar_pcie_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) .pm = &rcar_pcie_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) .suppress_bind_attrs = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) .probe = rcar_pcie_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) builtin_platform_driver(rcar_pcie_driver);