^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) // Copyright (c) 2017 Cadence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) // Cadence PCIe controller driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "pcie-cadence.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) u32 delay = 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) u32 ltssm_control_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Set the LTSSM Detect Quiet state min. delay to 2ms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) ltssm_control_cap = cdns_pcie_readl(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) ltssm_control_cap = ((ltssm_control_cap &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) ~CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) cdns_pcie_writel(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP, ltssm_control_cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) u32 r, bool is_io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) u64 cpu_addr, u64 pci_addr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * roundup_pow_of_two() returns an unsigned long, which is not suited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * for 64bit values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) u64 sz = 1ULL << fls64(size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) int nbits = ilog2(sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) u32 addr0, addr1, desc0, desc1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) if (nbits < 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) nbits = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* Set the PCI address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) (lower_32_bits(pci_addr) & GENMASK(31, 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) addr1 = upper_32_bits(pci_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), addr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), addr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* Set the PCIe header descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (is_io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) desc1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * Whatever Bit [23] is set or not inside DESC0 register of the outbound
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * PCIe descriptor, the PCI function number must be set into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * Bits [26:24] of DESC0 anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * In Root Complex mode, the function number is always 0 but in Endpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * mode, the PCIe controller may support more than one function. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * function number needs to be set properly into the outbound PCIe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * Besides, setting Bit [23] is mandatory when in Root Complex mode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * then the driver must provide the bus, resp. device, number in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * Bits [7:0] of DESC1, resp. Bits[31:27] of DESC0. Like the function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * number, the device number is always 0 in Root Complex mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * However when in Endpoint mode, we can clear Bit [23] of DESC0, hence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * the PCIe controller will use the captured values for the bus and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * device numbers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (pcie->is_rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /* The device and function numbers are always 0. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * Use captured values for bus and device numbers but still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * need to set the function number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* Set the CPU address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (pcie->ops->cpu_addr_fixup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) (lower_32_bits(cpu_addr) & GENMASK(31, 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) addr1 = upper_32_bits(cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) u8 busnr, u8 fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) u32 r, u64 cpu_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) u32 addr0, addr1, desc0, desc1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) desc1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* See cdns_pcie_set_outbound_region() comments above. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (pcie->is_rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /* Set the CPU address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if (pcie->ops->cpu_addr_fixup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(17) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) (lower_32_bits(cpu_addr) & GENMASK(31, 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) addr1 = upper_32_bits(cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) void cdns_pcie_disable_phy(struct cdns_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) int i = pcie->phy_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) while (i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) phy_power_off(pcie->phy[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) phy_exit(pcie->phy[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) int cdns_pcie_enable_phy(struct cdns_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) for (i = 0; i < pcie->phy_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) ret = phy_init(pcie->phy[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) goto err_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) ret = phy_power_on(pcie->phy[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) phy_exit(pcie->phy[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) goto err_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) err_phy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) while (--i >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) phy_power_off(pcie->phy[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) phy_exit(pcie->phy[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) int phy_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct phy **phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct device_link **link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) phy_count = of_property_count_strings(np, "phy-names");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (phy_count < 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) dev_err(dev, "no phy-names. PHY will not be initialized\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) pcie->phy_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (!phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (!link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) for (i = 0; i < phy_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) of_property_read_string_index(np, "phy-names", i, &name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) phy[i] = devm_phy_get(dev, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (IS_ERR(phy[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) ret = PTR_ERR(phy[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) goto err_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (!link[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) devm_phy_put(dev, phy[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) goto err_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) pcie->phy_count = phy_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) pcie->phy = phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) pcie->link = link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) ret = cdns_pcie_enable_phy(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) goto err_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) err_phy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) while (--i >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) device_link_del(link[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) devm_phy_put(dev, phy[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static int cdns_pcie_suspend_noirq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct cdns_pcie *pcie = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) cdns_pcie_disable_phy(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static int cdns_pcie_resume_noirq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct cdns_pcie *pcie = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) ret = cdns_pcie_enable_phy(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) dev_err(dev, "failed to enable phy\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) const struct dev_pm_ops cdns_pcie_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) cdns_pcie_resume_noirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) };