^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * MediaTek PCIe host controller driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2017 MediaTek Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: Ryder Lee <ryder.lee@mediatek.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Honghui Zhang <honghui.zhang@mediatek.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/irqchip/chained_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/msi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/of_pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/phy/phy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/reset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "../pci.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /* PCIe shared registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define PCIE_SYS_CFG 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define PCIE_INT_ENABLE 0x0c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define PCIE_CFG_ADDR 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define PCIE_CFG_DATA 0x24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /* PCIe per port registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define PCIE_BAR0_SETUP 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define PCIE_CLASS 0x34
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define PCIE_LINK_STATUS 0x50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define PCIE_PORT_INT_EN(x) BIT(20 + (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define PCIE_PORT_PERST(x) BIT(1 + (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define PCIE_PORT_LINKUP BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define PCIE_BAR_MAP_MAX GENMASK(31, 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define PCIE_BAR_ENABLE BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define PCIE_REVISION_ID BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define PCIE_CLASS_CODE (0x60400 << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define PCIE_CONF_REG(regn) (((regn) & GENMASK(7, 2)) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) ((((regn) >> 8) & GENMASK(3, 0)) << 24))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define PCIE_CONF_FUN(fun) (((fun) << 8) & GENMASK(10, 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define PCIE_CONF_DEV(dev) (((dev) << 11) & GENMASK(15, 11))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define PCIE_CONF_BUS(bus) (((bus) << 16) & GENMASK(23, 16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define PCIE_CONF_ADDR(regn, fun, dev, bus) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) (PCIE_CONF_REG(regn) | PCIE_CONF_FUN(fun) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) PCIE_CONF_DEV(dev) | PCIE_CONF_BUS(bus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /* MediaTek specific configuration registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define PCIE_FTS_NUM 0x70c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define PCIE_FTS_NUM_MASK GENMASK(15, 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define PCIE_FTS_NUM_L0(x) ((x) & 0xff << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define PCIE_FC_CREDIT 0x73c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define PCIE_FC_CREDIT_MASK (GENMASK(31, 31) | GENMASK(28, 16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define PCIE_FC_CREDIT_VAL(x) ((x) << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* PCIe V2 share registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define PCIE_SYS_CFG_V2 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define PCIE_CSR_LTSSM_EN(x) BIT(0 + (x) * 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define PCIE_CSR_ASPM_L1_EN(x) BIT(1 + (x) * 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /* PCIe V2 per-port registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define PCIE_MSI_VECTOR 0x0c0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define PCIE_CONF_VEND_ID 0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define PCIE_CONF_DEVICE_ID 0x102
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define PCIE_CONF_CLASS_ID 0x106
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define PCIE_INT_MASK 0x420
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define INTX_MASK GENMASK(19, 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define INTX_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define PCIE_INT_STATUS 0x424
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define MSI_STATUS BIT(23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define PCIE_IMSI_STATUS 0x42c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define PCIE_IMSI_ADDR 0x430
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define MSI_MASK BIT(23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define MTK_MSI_IRQS_NUM 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define PCIE_AHB_TRANS_BASE0_L 0x438
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define PCIE_AHB_TRANS_BASE0_H 0x43c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define AHB2PCIE_SIZE(x) ((x) & GENMASK(4, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define PCIE_AXI_WINDOW0 0x448
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define WIN_ENABLE BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * Define PCIe to AHB window size as 2^33 to support max 8GB address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * translate, support least 4GB DRAM size access from EP DMA(physical DRAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * start from 0x40000000).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define PCIE2AHB_SIZE 0x21
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /* PCIe V2 configuration transaction header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define PCIE_CFG_HEADER0 0x460
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define PCIE_CFG_HEADER1 0x464
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define PCIE_CFG_HEADER2 0x468
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define PCIE_CFG_WDATA 0x470
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define PCIE_APP_TLP_REQ 0x488
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define PCIE_CFG_RDATA 0x48c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define APP_CFG_REQ BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define APP_CPL_STATUS GENMASK(7, 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define CFG_WRRD_TYPE_0 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define CFG_WR_FMT 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define CFG_RD_FMT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define CFG_DW0_LENGTH(length) ((length) & GENMASK(9, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define CFG_DW0_TYPE(type) (((type) << 24) & GENMASK(28, 24))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define CFG_DW0_FMT(fmt) (((fmt) << 29) & GENMASK(31, 29))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define CFG_DW2_REGN(regn) ((regn) & GENMASK(11, 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define CFG_DW2_FUN(fun) (((fun) << 16) & GENMASK(18, 16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define CFG_DW2_DEV(dev) (((dev) << 19) & GENMASK(23, 19))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define CFG_DW2_BUS(bus) (((bus) << 24) & GENMASK(31, 24))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define CFG_HEADER_DW0(type, fmt) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) (CFG_DW0_LENGTH(1) | CFG_DW0_TYPE(type) | CFG_DW0_FMT(fmt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define CFG_HEADER_DW1(where, size) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) (GENMASK(((size) - 1), 0) << ((where) & 0x3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define CFG_HEADER_DW2(regn, fun, dev, bus) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) (CFG_DW2_REGN(regn) | CFG_DW2_FUN(fun) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) CFG_DW2_DEV(dev) | CFG_DW2_BUS(bus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define PCIE_RST_CTRL 0x510
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define PCIE_PHY_RSTB BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define PCIE_PIPE_SRSTB BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define PCIE_MAC_SRSTB BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define PCIE_CRSTB BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define PCIE_PERSTB BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define PCIE_LINKDOWN_RST_EN GENMASK(15, 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define PCIE_LINK_STATUS_V2 0x804
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define PCIE_PORT_LINKUP_V2 BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct mtk_pcie_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * struct mtk_pcie_soc - differentiate between host generations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * @need_fix_class_id: whether this host's class ID needed to be fixed or not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * @need_fix_device_id: whether this host's device ID needed to be fixed or not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * @device_id: device ID which this host need to be fixed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * @ops: pointer to configuration access functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * @startup: pointer to controller setting functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * @setup_irq: pointer to initialize IRQ functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct mtk_pcie_soc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) bool need_fix_class_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) bool need_fix_device_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) unsigned int device_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct pci_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) int (*startup)(struct mtk_pcie_port *port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * struct mtk_pcie_port - PCIe port information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * @base: IO mapped register base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * @list: port list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * @pcie: pointer to PCIe host info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * @reset: pointer to port reset control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * @sys_ck: pointer to transaction/data link layer clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * @ahb_ck: pointer to AHB slave interface operating clock for CSR access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * and RC initiated MMIO access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * @axi_ck: pointer to application layer MMIO channel operating clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * @aux_ck: pointer to pe2_mac_bridge and pe2_mac_core operating clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * when pcie_mac_ck/pcie_pipe_ck is turned off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * @obff_ck: pointer to OBFF functional block operating clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * @pipe_ck: pointer to LTSSM and PHY/MAC layer operating clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * @phy: pointer to PHY control block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * @slot: port slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * @irq: GIC irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * @irq_domain: legacy INTx IRQ domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * @inner_domain: inner IRQ domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * @msi_domain: MSI IRQ domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * @lock: protect the msi_irq_in_use bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * @msi_irq_in_use: bit map for assigned MSI IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct mtk_pcie_port {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct mtk_pcie *pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct reset_control *reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct clk *sys_ck;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct clk *ahb_ck;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct clk *axi_ck;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct clk *aux_ck;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct clk *obff_ck;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct clk *pipe_ck;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct phy *phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) u32 slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct irq_domain *irq_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct irq_domain *inner_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct irq_domain *msi_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct mutex lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * struct mtk_pcie - PCIe host information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * @dev: pointer to PCIe device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * @base: IO mapped register base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * @free_ck: free-run reference clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * @mem: non-prefetchable memory resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * @ports: pointer to PCIe port information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * @soc: pointer to SoC-dependent operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) struct mtk_pcie {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct clk *free_ck;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct list_head ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) const struct mtk_pcie_soc *soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) clk_disable_unprepare(pcie->free_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) pm_runtime_put_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) pm_runtime_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static void mtk_pcie_port_free(struct mtk_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct mtk_pcie *pcie = port->pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) devm_iounmap(dev, port->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) list_del(&port->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) devm_kfree(dev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static void mtk_pcie_put_resources(struct mtk_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct mtk_pcie_port *port, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) phy_power_off(port->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) phy_exit(port->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) clk_disable_unprepare(port->pipe_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) clk_disable_unprepare(port->obff_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) clk_disable_unprepare(port->axi_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) clk_disable_unprepare(port->aux_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) clk_disable_unprepare(port->ahb_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) clk_disable_unprepare(port->sys_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) mtk_pcie_port_free(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) mtk_pcie_subsys_powerdown(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static int mtk_pcie_check_cfg_cpld(struct mtk_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) err = readl_poll_timeout_atomic(port->base + PCIE_APP_TLP_REQ, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) !(val & APP_CFG_REQ), 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 100 * USEC_PER_MSEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return PCIBIOS_SET_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (readl(port->base + PCIE_APP_TLP_REQ) & APP_CPL_STATUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return PCIBIOS_SET_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static int mtk_pcie_hw_rd_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) int where, int size, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /* Write PCIe configuration transaction header for Cfgrd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_RD_FMT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) port->base + PCIE_CFG_HEADER0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) port->base + PCIE_CFG_HEADER2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /* Trigger h/w to transmit Cfgrd TLP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) tmp = readl(port->base + PCIE_APP_TLP_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) tmp |= APP_CFG_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) writel(tmp, port->base + PCIE_APP_TLP_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /* Check completion status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (mtk_pcie_check_cfg_cpld(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return PCIBIOS_SET_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /* Read cpld payload of Cfgrd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) *val = readl(port->base + PCIE_CFG_RDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (size == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) *val = (*val >> (8 * (where & 3))) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) else if (size == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) *val = (*val >> (8 * (where & 3))) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) return PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) static int mtk_pcie_hw_wr_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) int where, int size, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /* Write PCIe configuration transaction header for Cfgwr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_WR_FMT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) port->base + PCIE_CFG_HEADER0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) port->base + PCIE_CFG_HEADER2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /* Write Cfgwr data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) val = val << 8 * (where & 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) writel(val, port->base + PCIE_CFG_WDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /* Trigger h/w to transmit Cfgwr TLP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) val = readl(port->base + PCIE_APP_TLP_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) val |= APP_CFG_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) writel(val, port->base + PCIE_APP_TLP_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /* Check completion status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return mtk_pcie_check_cfg_cpld(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) static struct mtk_pcie_port *mtk_pcie_find_port(struct pci_bus *bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) unsigned int devfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) struct mtk_pcie *pcie = bus->sysdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) struct mtk_pcie_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct pci_dev *dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * Walk the bus hierarchy to get the devfn value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * of the port in the root bus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) while (bus && bus->number) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) dev = bus->self;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) bus = dev->bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) devfn = dev->devfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) list_for_each_entry(port, &pcie->ports, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (port->slot == PCI_SLOT(devfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) return port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) int where, int size, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct mtk_pcie_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) u32 bn = bus->number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) port = mtk_pcie_find_port(bus, devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (!port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) *val = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) ret = mtk_pcie_hw_rd_cfg(port, bn, devfn, where, size, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) *val = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) int where, int size, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct mtk_pcie_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) u32 bn = bus->number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) port = mtk_pcie_find_port(bus, devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (!port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return PCIBIOS_DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return mtk_pcie_hw_wr_cfg(port, bn, devfn, where, size, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) static struct pci_ops mtk_pcie_ops_v2 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) .read = mtk_pcie_config_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) .write = mtk_pcie_config_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) phys_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) /* MT2712/MT7622 only support 32-bit MSI addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) msg->address_hi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) msg->address_lo = lower_32_bits(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) msg->data = data->hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) dev_dbg(port->pcie->dev, "msi#%d address_hi %#x address_lo %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) (int)data->hwirq, msg->address_hi, msg->address_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) static int mtk_msi_set_affinity(struct irq_data *irq_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) const struct cpumask *mask, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static void mtk_msi_ack_irq(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) u32 hwirq = data->hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) writel(1 << hwirq, port->base + PCIE_IMSI_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) static struct irq_chip mtk_msi_bottom_irq_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) .name = "MTK MSI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) .irq_compose_msi_msg = mtk_compose_msi_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) .irq_set_affinity = mtk_msi_set_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) .irq_ack = mtk_msi_ack_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static int mtk_pcie_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) unsigned int nr_irqs, void *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct mtk_pcie_port *port = domain->host_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) unsigned long bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) WARN_ON(nr_irqs != 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) mutex_lock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) bit = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (bit >= MTK_MSI_IRQS_NUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) mutex_unlock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) __set_bit(bit, port->msi_irq_in_use);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) mutex_unlock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) irq_domain_set_info(domain, virq, bit, &mtk_msi_bottom_irq_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) domain->host_data, handle_edge_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static void mtk_pcie_irq_domain_free(struct irq_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) unsigned int virq, unsigned int nr_irqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) struct irq_data *d = irq_domain_get_irq_data(domain, virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) struct mtk_pcie_port *port = irq_data_get_irq_chip_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) mutex_lock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (!test_bit(d->hwirq, port->msi_irq_in_use))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) dev_err(port->pcie->dev, "trying to free unused MSI#%lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) d->hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) __clear_bit(d->hwirq, port->msi_irq_in_use);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) mutex_unlock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) irq_domain_free_irqs_parent(domain, virq, nr_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) static const struct irq_domain_ops msi_domain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) .alloc = mtk_pcie_irq_domain_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) .free = mtk_pcie_irq_domain_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) static struct irq_chip mtk_msi_irq_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) .name = "MTK PCIe MSI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) .irq_ack = irq_chip_ack_parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) .irq_mask = pci_msi_mask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) .irq_unmask = pci_msi_unmask_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) static struct msi_domain_info mtk_msi_domain_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) MSI_FLAG_PCI_MSIX),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) .chip = &mtk_msi_irq_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) struct fwnode_handle *fwnode = of_node_to_fwnode(port->pcie->dev->of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) mutex_init(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) port->inner_domain = irq_domain_create_linear(fwnode, MTK_MSI_IRQS_NUM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) &msi_domain_ops, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (!port->inner_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) dev_err(port->pcie->dev, "failed to create IRQ domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) port->msi_domain = pci_msi_create_irq_domain(fwnode, &mtk_msi_domain_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) port->inner_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (!port->msi_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) dev_err(port->pcie->dev, "failed to create MSI domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) irq_domain_remove(port->inner_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) static void mtk_pcie_enable_msi(struct mtk_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) phys_addr_t msg_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) msg_addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) val = lower_32_bits(msg_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) writel(val, port->base + PCIE_IMSI_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) val = readl(port->base + PCIE_INT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) val &= ~MSI_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) writel(val, port->base + PCIE_INT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static void mtk_pcie_irq_teardown(struct mtk_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) struct mtk_pcie_port *port, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) irq_set_chained_handler_and_data(port->irq, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (port->irq_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) irq_domain_remove(port->irq_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (IS_ENABLED(CONFIG_PCI_MSI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (port->msi_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) irq_domain_remove(port->msi_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (port->inner_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) irq_domain_remove(port->inner_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) irq_dispose_mapping(port->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) irq_hw_number_t hwirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) irq_set_chip_data(irq, domain->host_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) static const struct irq_domain_ops intx_domain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) .map = mtk_pcie_intx_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) struct device_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) struct device *dev = port->pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) struct device_node *pcie_intc_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) /* Setup INTx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) pcie_intc_node = of_get_next_child(node, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (!pcie_intc_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) dev_err(dev, "no PCIe Intc node found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) &intx_domain_ops, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) of_node_put(pcie_intc_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (!port->irq_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) dev_err(dev, "failed to get INTx IRQ domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (IS_ENABLED(CONFIG_PCI_MSI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) ret = mtk_pcie_allocate_msi_domains(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) static void mtk_pcie_intr_handler(struct irq_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) struct irq_chip *irqchip = irq_desc_get_chip(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) unsigned long status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) u32 virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) u32 bit = INTX_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) chained_irq_enter(irqchip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) status = readl(port->base + PCIE_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (status & INTX_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) for_each_set_bit_from(bit, &status, PCI_NUM_INTX + INTX_SHIFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) /* Clear the INTx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) writel(1 << bit, port->base + PCIE_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) virq = irq_find_mapping(port->irq_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) bit - INTX_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) generic_handle_irq(virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (IS_ENABLED(CONFIG_PCI_MSI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (status & MSI_STATUS){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) unsigned long imsi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) virq = irq_find_mapping(port->inner_domain, bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) generic_handle_irq(virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /* Clear MSI interrupt status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) chained_irq_exit(irqchip, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) struct device_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) struct mtk_pcie *pcie = port->pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) struct platform_device *pdev = to_platform_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) err = mtk_pcie_init_irq_domain(port, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) dev_err(dev, "failed to init PCIe IRQ domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) port->irq = platform_get_irq(pdev, port->slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (port->irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return port->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) irq_set_chained_handler_and_data(port->irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) mtk_pcie_intr_handler, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) struct mtk_pcie *pcie = port->pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) struct resource *mem = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) struct resource_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) const struct mtk_pcie_soc *soc = port->pcie->soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) entry = resource_list_first_type(&host->windows, IORESOURCE_MEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) mem = entry->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (!mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) if (pcie->base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) val = readl(pcie->base + PCIE_SYS_CFG_V2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) val |= PCIE_CSR_LTSSM_EN(port->slot) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) PCIE_CSR_ASPM_L1_EN(port->slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) writel(val, pcie->base + PCIE_SYS_CFG_V2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) /* Assert all reset signals */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) writel(0, port->base + PCIE_RST_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * Enable PCIe link down reset, if link status changed from link up to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * link down, this will reset MAC control registers and configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) /* De-assert PHY, PE, PIPE, MAC and configuration reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) val = readl(port->base + PCIE_RST_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) PCIE_MAC_SRSTB | PCIE_CRSTB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) writel(val, port->base + PCIE_RST_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) /* Set up vendor ID and class code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (soc->need_fix_class_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) val = PCI_VENDOR_ID_MEDIATEK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) writew(val, port->base + PCIE_CONF_VEND_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) val = PCI_CLASS_BRIDGE_PCI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) writew(val, port->base + PCIE_CONF_CLASS_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (soc->need_fix_device_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) writew(soc->device_id, port->base + PCIE_CONF_DEVICE_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) /* 100ms timeout value should be enough for Gen1/2 training */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) !!(val & PCIE_PORT_LINKUP_V2), 20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 100 * USEC_PER_MSEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) /* Set INTx mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) val = readl(port->base + PCIE_INT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) val &= ~INTX_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) writel(val, port->base + PCIE_INT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) if (IS_ENABLED(CONFIG_PCI_MSI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) mtk_pcie_enable_msi(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) /* Set AHB to PCIe translation windows */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) val = lower_32_bits(mem->start) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) AHB2PCIE_SIZE(fls(resource_size(mem)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) writel(val, port->base + PCIE_AHB_TRANS_BASE0_L);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) val = upper_32_bits(mem->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) writel(val, port->base + PCIE_AHB_TRANS_BASE0_H);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) /* Set PCIe to AXI translation memory space.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) val = PCIE2AHB_SIZE | WIN_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) writel(val, port->base + PCIE_AXI_WINDOW0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) unsigned int devfn, int where)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) struct mtk_pcie *pcie = bus->sysdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) writel(PCIE_CONF_ADDR(where, PCI_FUNC(devfn), PCI_SLOT(devfn),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) bus->number), pcie->base + PCIE_CFG_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) return pcie->base + PCIE_CFG_DATA + (where & 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) static struct pci_ops mtk_pcie_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) .map_bus = mtk_pcie_map_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) .read = pci_generic_config_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) .write = pci_generic_config_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) static int mtk_pcie_startup_port(struct mtk_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) struct mtk_pcie *pcie = port->pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) u32 func = PCI_FUNC(port->slot << 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) u32 slot = PCI_SLOT(port->slot << 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) /* assert port PERST_N */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) val = readl(pcie->base + PCIE_SYS_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) val |= PCIE_PORT_PERST(port->slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) writel(val, pcie->base + PCIE_SYS_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) /* de-assert port PERST_N */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) val = readl(pcie->base + PCIE_SYS_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) val &= ~PCIE_PORT_PERST(port->slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) writel(val, pcie->base + PCIE_SYS_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) /* 100ms timeout value should be enough for Gen1/2 training */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) err = readl_poll_timeout(port->base + PCIE_LINK_STATUS, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) !!(val & PCIE_PORT_LINKUP), 20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 100 * USEC_PER_MSEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) /* enable interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) val = readl(pcie->base + PCIE_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) val |= PCIE_PORT_INT_EN(port->slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) writel(val, pcie->base + PCIE_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) /* map to all DDR region. We need to set it before cfg operation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) writel(PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) port->base + PCIE_BAR0_SETUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) /* configure class code and revision ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) writel(PCIE_CLASS_CODE | PCIE_REVISION_ID, port->base + PCIE_CLASS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) /* configure FC credit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) pcie->base + PCIE_CFG_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) val = readl(pcie->base + PCIE_CFG_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) val &= ~PCIE_FC_CREDIT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) val |= PCIE_FC_CREDIT_VAL(0x806c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) pcie->base + PCIE_CFG_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) writel(val, pcie->base + PCIE_CFG_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) /* configure RC FTS number to 250 when it leaves L0s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) pcie->base + PCIE_CFG_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) val = readl(pcie->base + PCIE_CFG_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) val &= ~PCIE_FTS_NUM_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) val |= PCIE_FTS_NUM_L0(0x50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) pcie->base + PCIE_CFG_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) writel(val, pcie->base + PCIE_CFG_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) static void mtk_pcie_enable_port(struct mtk_pcie_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) struct mtk_pcie *pcie = port->pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) err = clk_prepare_enable(port->sys_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) dev_err(dev, "failed to enable sys_ck%d clock\n", port->slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) goto err_sys_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) err = clk_prepare_enable(port->ahb_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) dev_err(dev, "failed to enable ahb_ck%d\n", port->slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) goto err_ahb_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) err = clk_prepare_enable(port->aux_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) dev_err(dev, "failed to enable aux_ck%d\n", port->slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) goto err_aux_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) err = clk_prepare_enable(port->axi_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) dev_err(dev, "failed to enable axi_ck%d\n", port->slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) goto err_axi_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) err = clk_prepare_enable(port->obff_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) dev_err(dev, "failed to enable obff_ck%d\n", port->slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) goto err_obff_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) err = clk_prepare_enable(port->pipe_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) dev_err(dev, "failed to enable pipe_ck%d\n", port->slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) goto err_pipe_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) reset_control_assert(port->reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) reset_control_deassert(port->reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) err = phy_init(port->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) dev_err(dev, "failed to initialize port%d phy\n", port->slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) goto err_phy_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) err = phy_power_on(port->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) dev_err(dev, "failed to power on port%d phy\n", port->slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) goto err_phy_on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) if (!pcie->soc->startup(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) dev_info(dev, "Port%d link down\n", port->slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) phy_power_off(port->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) err_phy_on:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) phy_exit(port->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) err_phy_init:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) clk_disable_unprepare(port->pipe_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) err_pipe_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) clk_disable_unprepare(port->obff_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) err_obff_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) clk_disable_unprepare(port->axi_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) err_axi_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) clk_disable_unprepare(port->aux_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) err_aux_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) clk_disable_unprepare(port->ahb_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) err_ahb_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) clk_disable_unprepare(port->sys_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) err_sys_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) mtk_pcie_port_free(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) static int mtk_pcie_parse_port(struct mtk_pcie *pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) struct device_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) int slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) struct mtk_pcie_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) struct platform_device *pdev = to_platform_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) char name[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (!port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) snprintf(name, sizeof(name), "port%d", slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) port->base = devm_platform_ioremap_resource_byname(pdev, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (IS_ERR(port->base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) dev_err(dev, "failed to map port%d base\n", slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) return PTR_ERR(port->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) snprintf(name, sizeof(name), "sys_ck%d", slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) port->sys_ck = devm_clk_get(dev, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) if (IS_ERR(port->sys_ck)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) dev_err(dev, "failed to get sys_ck%d clock\n", slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) return PTR_ERR(port->sys_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) /* sys_ck might be divided into the following parts in some chips */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) snprintf(name, sizeof(name), "ahb_ck%d", slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) port->ahb_ck = devm_clk_get_optional(dev, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) if (IS_ERR(port->ahb_ck))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) return PTR_ERR(port->ahb_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) snprintf(name, sizeof(name), "axi_ck%d", slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) port->axi_ck = devm_clk_get_optional(dev, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) if (IS_ERR(port->axi_ck))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) return PTR_ERR(port->axi_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) snprintf(name, sizeof(name), "aux_ck%d", slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) port->aux_ck = devm_clk_get_optional(dev, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (IS_ERR(port->aux_ck))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) return PTR_ERR(port->aux_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) snprintf(name, sizeof(name), "obff_ck%d", slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) port->obff_ck = devm_clk_get_optional(dev, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) if (IS_ERR(port->obff_ck))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) return PTR_ERR(port->obff_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) snprintf(name, sizeof(name), "pipe_ck%d", slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) port->pipe_ck = devm_clk_get_optional(dev, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (IS_ERR(port->pipe_ck))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) return PTR_ERR(port->pipe_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) snprintf(name, sizeof(name), "pcie-rst%d", slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) port->reset = devm_reset_control_get_optional_exclusive(dev, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if (PTR_ERR(port->reset) == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) return PTR_ERR(port->reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) /* some platforms may use default PHY setting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) snprintf(name, sizeof(name), "pcie-phy%d", slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) port->phy = devm_phy_optional_get(dev, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (IS_ERR(port->phy))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) return PTR_ERR(port->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) port->slot = slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) port->pcie = pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (pcie->soc->setup_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) err = pcie->soc->setup_irq(port, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) INIT_LIST_HEAD(&port->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) list_add_tail(&port->list, &pcie->ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) struct platform_device *pdev = to_platform_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) struct resource *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) /* get shared registers, which are optional */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "subsys");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) if (regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) pcie->base = devm_ioremap_resource(dev, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if (IS_ERR(pcie->base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) dev_err(dev, "failed to map shared register\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) return PTR_ERR(pcie->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) pcie->free_ck = devm_clk_get(dev, "free_ck");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) if (IS_ERR(pcie->free_ck)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (PTR_ERR(pcie->free_ck) == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) return -EPROBE_DEFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) pcie->free_ck = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) pm_runtime_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) pm_runtime_get_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) /* enable top level clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) err = clk_prepare_enable(pcie->free_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) dev_err(dev, "failed to enable free_ck\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) goto err_free_ck;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) err_free_ck:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) pm_runtime_put_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) pm_runtime_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) static int mtk_pcie_setup(struct mtk_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) struct device *dev = pcie->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) struct device_node *node = dev->of_node, *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) struct mtk_pcie_port *port, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) for_each_available_child_of_node(node, child) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) err = of_pci_get_devfn(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) dev_err(dev, "failed to parse devfn: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) goto error_put_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) slot = PCI_SLOT(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) err = mtk_pcie_parse_port(pcie, child, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) goto error_put_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) err = mtk_pcie_subsys_powerup(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) /* enable each port, and then check link status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) list_for_each_entry_safe(port, tmp, &pcie->ports, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) mtk_pcie_enable_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) /* power down PCIe subsys if slots are all empty (link down) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) if (list_empty(&pcie->ports))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) mtk_pcie_subsys_powerdown(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) error_put_node:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) of_node_put(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) static int mtk_pcie_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) struct mtk_pcie *pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) struct pci_host_bridge *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (!host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) pcie = pci_host_bridge_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) pcie->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) pcie->soc = of_device_get_match_data(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) platform_set_drvdata(pdev, pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) INIT_LIST_HEAD(&pcie->ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) err = mtk_pcie_setup(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) host->ops = pcie->soc->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) host->sysdata = pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) err = pci_host_probe(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) goto put_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) put_resources:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) if (!list_empty(&pcie->ports))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) mtk_pcie_put_resources(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) static void mtk_pcie_free_resources(struct mtk_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) struct list_head *windows = &host->windows;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) pci_free_resource_list(windows);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) static int mtk_pcie_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) struct mtk_pcie *pcie = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) pci_stop_root_bus(host->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) pci_remove_root_bus(host->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) mtk_pcie_free_resources(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) mtk_pcie_irq_teardown(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) mtk_pcie_put_resources(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) struct mtk_pcie *pcie = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) struct mtk_pcie_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) if (list_empty(&pcie->ports))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) list_for_each_entry(port, &pcie->ports, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) clk_disable_unprepare(port->pipe_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) clk_disable_unprepare(port->obff_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) clk_disable_unprepare(port->axi_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) clk_disable_unprepare(port->aux_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) clk_disable_unprepare(port->ahb_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) clk_disable_unprepare(port->sys_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) phy_power_off(port->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) phy_exit(port->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) clk_disable_unprepare(pcie->free_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) struct mtk_pcie *pcie = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) struct mtk_pcie_port *port, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) if (list_empty(&pcie->ports))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) clk_prepare_enable(pcie->free_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) list_for_each_entry_safe(port, tmp, &pcie->ports, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) mtk_pcie_enable_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) /* In case of EP was removed while system suspend. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) if (list_empty(&pcie->ports))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) clk_disable_unprepare(pcie->free_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) static const struct dev_pm_ops mtk_pcie_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) mtk_pcie_resume_noirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) static const struct mtk_pcie_soc mtk_pcie_soc_v1 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) .ops = &mtk_pcie_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) .startup = mtk_pcie_startup_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) static const struct mtk_pcie_soc mtk_pcie_soc_mt2712 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) .ops = &mtk_pcie_ops_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) .startup = mtk_pcie_startup_port_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) .setup_irq = mtk_pcie_setup_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) static const struct mtk_pcie_soc mtk_pcie_soc_mt7622 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) .need_fix_class_id = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) .ops = &mtk_pcie_ops_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) .startup = mtk_pcie_startup_port_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) .setup_irq = mtk_pcie_setup_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) static const struct mtk_pcie_soc mtk_pcie_soc_mt7629 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) .need_fix_class_id = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) .need_fix_device_id = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) .device_id = PCI_DEVICE_ID_MEDIATEK_7629,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) .ops = &mtk_pcie_ops_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) .startup = mtk_pcie_startup_port_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) .setup_irq = mtk_pcie_setup_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) static const struct of_device_id mtk_pcie_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) { .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) { .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_mt2712 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) { .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_mt7622 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) { .compatible = "mediatek,mt7629-pcie", .data = &mtk_pcie_soc_mt7629 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) static struct platform_driver mtk_pcie_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) .probe = mtk_pcie_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) .remove = mtk_pcie_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) .name = "mtk-pcie",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) .of_match_table = mtk_pcie_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) .suppress_bind_attrs = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) .pm = &mtk_pcie_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) module_platform_driver(mtk_pcie_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) MODULE_LICENSE("GPL v2");