^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * phy-zynqmp.c - PHY driver for Xilinx ZynqMP GT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2018-2020 Xilinx Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author: Anurag Kumar Vulisha <anuragku@xilinx.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Author: Subbaraya Sundeep <sundeep.lkml@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * This driver is tested for USB, SATA and Display Port currently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Other controllers PCIe and SGMII should also work but that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * experimental as of now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/phy/phy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <dt-bindings/phy/phy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * Lane Registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /* TX De-emphasis parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define L0_TX_ANA_TM_18 0x0048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define L0_TX_ANA_TM_118 0x01d8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define L0_TX_ANA_TM_118_FORCE_17_0 BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /* DN Resistor calibration code parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define L0_TXPMA_ST_3 0x0b0c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define L0_DN_CALIB_CODE 0x3f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* PMA control parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define L0_TXPMD_TM_45 0x0cb4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define L0_TXPMD_TM_48 0x0cc0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define L0_TXPMD_TM_45_OVER_DP_MAIN BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define L0_TXPMD_TM_45_ENABLE_DP_MAIN BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define L0_TXPMD_TM_45_OVER_DP_POST1 BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define L0_TXPMD_TM_45_ENABLE_DP_POST1 BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define L0_TXPMD_TM_45_OVER_DP_POST2 BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define L0_TXPMD_TM_45_ENABLE_DP_POST2 BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* PCS control parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define L0_TM_DIG_6 0x106c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define L0_TM_DIS_DESCRAMBLE_DECODER 0x0f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define L0_TX_DIG_61 0x00f4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define L0_TM_DISABLE_SCRAMBLE_ENCODER 0x0f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* PLL Test Mode register parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define L0_TM_PLL_DIG_37 0x2094
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define L0_TM_COARSE_CODE_LIMIT 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /* PLL SSC step size offsets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define L0_PLL_SS_STEPS_0_LSB 0x2368
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define L0_PLL_SS_STEPS_1_MSB 0x236c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define L0_PLL_SS_STEP_SIZE_0_LSB 0x2370
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define L0_PLL_SS_STEP_SIZE_1 0x2374
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define L0_PLL_SS_STEP_SIZE_2 0x2378
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define L0_PLL_SS_STEP_SIZE_3_MSB 0x237c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define L0_PLL_STATUS_READ_1 0x23e4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /* SSC step size parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define STEP_SIZE_0_MASK 0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define STEP_SIZE_1_MASK 0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define STEP_SIZE_2_MASK 0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define STEP_SIZE_3_MASK 0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define STEP_SIZE_SHIFT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define FORCE_STEP_SIZE 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define FORCE_STEPS 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define STEPS_0_MASK 0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define STEPS_1_MASK 0x07
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /* Reference clock selection parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define L0_Ln_REF_CLK_SEL(n) (0x2860 + (n) * 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define L0_REF_CLK_SEL_MASK 0x8f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /* Calibration digital logic parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define L3_TM_CALIB_DIG19 0xec4c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define L3_CALIB_DONE_STATUS 0xef14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define L3_TM_CALIB_DIG18 0xec48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define L3_TM_CALIB_DIG19_NSW 0x07
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define L3_TM_CALIB_DIG18_NSW 0xe0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define L3_TM_OVERRIDE_NSW_CODE 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define L3_CALIB_DONE 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define L3_NSW_SHIFT 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define L3_NSW_PIPE_SHIFT 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define L3_NSW_CALIB_SHIFT 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define PHY_REG_OFFSET 0x4000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * Global Registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* Refclk selection parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define PLL_REF_SEL(n) (0x10000 + (n) * 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define PLL_FREQ_MASK 0x1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define PLL_STATUS_LOCKED 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* Inter Connect Matrix parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define ICM_CFG0 0x10010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define ICM_CFG1 0x10014
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define ICM_CFG0_L0_MASK 0x07
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define ICM_CFG0_L1_MASK 0x70
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define ICM_CFG1_L2_MASK 0x07
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define ICM_CFG2_L3_MASK 0x70
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define ICM_CFG_SHIFT 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* Inter Connect Matrix allowed protocols */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define ICM_PROTOCOL_PD 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define ICM_PROTOCOL_PCIE 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define ICM_PROTOCOL_SATA 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define ICM_PROTOCOL_USB 0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define ICM_PROTOCOL_DP 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define ICM_PROTOCOL_SGMII 0x5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /* Test Mode common reset control parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define TM_CMN_RST 0x10018
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define TM_CMN_RST_EN 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define TM_CMN_RST_SET 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define TM_CMN_RST_MASK 0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* Bus width parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define TX_PROT_BUS_WIDTH 0x10040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define RX_PROT_BUS_WIDTH 0x10044
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define PROT_BUS_WIDTH_10 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define PROT_BUS_WIDTH_20 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define PROT_BUS_WIDTH_40 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define PROT_BUS_WIDTH_SHIFT(n) ((n) * 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define PROT_BUS_WIDTH_MASK(n) GENMASK((n) * 2 + 1, (n) * 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* Number of GT lanes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define NUM_LANES 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /* SIOU SATA control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define SATA_CONTROL_OFFSET 0x0100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /* Total number of controllers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define CONTROLLERS_PER_LANE 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /* Protocol Type parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define XPSGTR_TYPE_USB0 0 /* USB controller 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #define XPSGTR_TYPE_USB1 1 /* USB controller 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #define XPSGTR_TYPE_SATA_0 2 /* SATA controller lane 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #define XPSGTR_TYPE_SATA_1 3 /* SATA controller lane 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define XPSGTR_TYPE_PCIE_0 4 /* PCIe controller lane 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define XPSGTR_TYPE_PCIE_1 5 /* PCIe controller lane 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define XPSGTR_TYPE_PCIE_2 6 /* PCIe controller lane 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define XPSGTR_TYPE_PCIE_3 7 /* PCIe controller lane 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define XPSGTR_TYPE_DP_0 8 /* Display Port controller lane 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define XPSGTR_TYPE_DP_1 9 /* Display Port controller lane 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #define XPSGTR_TYPE_SGMII0 10 /* Ethernet SGMII controller 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define XPSGTR_TYPE_SGMII1 11 /* Ethernet SGMII controller 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #define XPSGTR_TYPE_SGMII2 12 /* Ethernet SGMII controller 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #define XPSGTR_TYPE_SGMII3 13 /* Ethernet SGMII controller 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /* Timeout values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #define TIMEOUT_US 1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct xpsgtr_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * struct xpsgtr_ssc - structure to hold SSC settings for a lane
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * @refclk_rate: PLL reference clock frequency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * @pll_ref_clk: value to be written to register for corresponding ref clk rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * @steps: number of steps of SSC (Spread Spectrum Clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * @step_size: step size of each step
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct xpsgtr_ssc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) u32 refclk_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) u8 pll_ref_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) u32 steps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) u32 step_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * struct xpsgtr_phy - representation of a lane
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * @phy: pointer to the kernel PHY device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * @type: controller which uses this lane
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * @lane: lane number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * @protocol: protocol in which the lane operates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * @skip_phy_init: skip phy_init() if true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * @dev: pointer to the xpsgtr_dev instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * @refclk: reference clock index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct xpsgtr_phy {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct phy *phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) u8 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) u8 lane;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) u8 protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) bool skip_phy_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct xpsgtr_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) unsigned int refclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * struct xpsgtr_dev - representation of a ZynMP GT device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * @dev: pointer to device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * @serdes: serdes base address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * @siou: siou base address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * @gtr_mutex: mutex for locking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * @phys: PHY lanes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * @refclk_sscs: spread spectrum settings for the reference clocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * @tx_term_fix: fix for GT issue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * @saved_icm_cfg0: stored value of ICM CFG0 register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * @saved_icm_cfg1: stored value of ICM CFG1 register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct xpsgtr_dev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) void __iomem *serdes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) void __iomem *siou;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct mutex gtr_mutex; /* mutex for locking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct xpsgtr_phy phys[NUM_LANES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) const struct xpsgtr_ssc *refclk_sscs[NUM_LANES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) bool tx_term_fix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) unsigned int saved_icm_cfg0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) unsigned int saved_icm_cfg1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * Configuration Data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /* lookup table to hold all settings needed for a ref clock frequency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static const struct xpsgtr_ssc ssc_lookup[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) { 19200000, 0x05, 608, 264020 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) { 20000000, 0x06, 634, 243454 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) { 24000000, 0x07, 760, 168973 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) { 26000000, 0x08, 824, 143860 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) { 27000000, 0x09, 856, 86551 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) { 38400000, 0x0a, 1218, 65896 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) { 40000000, 0x0b, 634, 243454 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) { 52000000, 0x0c, 824, 143860 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) { 100000000, 0x0d, 1058, 87533 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) { 108000000, 0x0e, 856, 86551 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) { 125000000, 0x0f, 992, 119497 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) { 135000000, 0x10, 1070, 55393 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) { 150000000, 0x11, 792, 187091 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * I/O Accessors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) static inline u32 xpsgtr_read(struct xpsgtr_dev *gtr_dev, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return readl(gtr_dev->serdes + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static inline void xpsgtr_write(struct xpsgtr_dev *gtr_dev, u32 reg, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) writel(value, gtr_dev->serdes + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static inline void xpsgtr_clr_set(struct xpsgtr_dev *gtr_dev, u32 reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) u32 clr, u32 set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) u32 value = xpsgtr_read(gtr_dev, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) value &= ~clr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) value |= set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) xpsgtr_write(gtr_dev, reg, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static inline u32 xpsgtr_read_phy(struct xpsgtr_phy *gtr_phy, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) void __iomem *addr = gtr_phy->dev->serdes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) + gtr_phy->lane * PHY_REG_OFFSET + reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return readl(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static inline void xpsgtr_write_phy(struct xpsgtr_phy *gtr_phy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) u32 reg, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) void __iomem *addr = gtr_phy->dev->serdes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) + gtr_phy->lane * PHY_REG_OFFSET + reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) writel(value, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static inline void xpsgtr_clr_set_phy(struct xpsgtr_phy *gtr_phy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) u32 reg, u32 clr, u32 set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) void __iomem *addr = gtr_phy->dev->serdes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) + gtr_phy->lane * PHY_REG_OFFSET + reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) writel((readl(addr) & ~clr) | set, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * Hardware Configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /* Wait for the PLL to lock (with a timeout). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static int xpsgtr_wait_pll_lock(struct phy *phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) unsigned int timeout = TIMEOUT_US;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) dev_dbg(gtr_dev->dev, "Waiting for PLL lock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) u32 reg = xpsgtr_read_phy(gtr_phy, L0_PLL_STATUS_READ_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if ((reg & PLL_STATUS_LOCKED) == PLL_STATUS_LOCKED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (--timeout == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (ret == -ETIMEDOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) dev_err(gtr_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) "lane %u (type %u, protocol %u): PLL lock timeout\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) gtr_phy->lane, gtr_phy->type, gtr_phy->protocol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /* Configure PLL and spread-sprectrum clock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) static void xpsgtr_configure_pll(struct xpsgtr_phy *gtr_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) const struct xpsgtr_ssc *ssc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) u32 step_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) ssc = gtr_phy->dev->refclk_sscs[gtr_phy->refclk];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) step_size = ssc->step_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) xpsgtr_clr_set(gtr_phy->dev, PLL_REF_SEL(gtr_phy->lane),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) PLL_FREQ_MASK, ssc->pll_ref_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /* Enable lane clock sharing, if required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (gtr_phy->refclk != gtr_phy->lane) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /* Lane3 Ref Clock Selection Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) xpsgtr_clr_set(gtr_phy->dev, L0_Ln_REF_CLK_SEL(gtr_phy->lane),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) L0_REF_CLK_SEL_MASK, 1 << gtr_phy->refclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /* SSC step size [7:0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_0_LSB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) STEP_SIZE_0_MASK, step_size & STEP_SIZE_0_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /* SSC step size [15:8] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) step_size >>= STEP_SIZE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) STEP_SIZE_1_MASK, step_size & STEP_SIZE_1_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /* SSC step size [23:16] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) step_size >>= STEP_SIZE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) STEP_SIZE_2_MASK, step_size & STEP_SIZE_2_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /* SSC steps [7:0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEPS_0_LSB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) STEPS_0_MASK, ssc->steps & STEPS_0_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /* SSC steps [10:8] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEPS_1_MSB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) STEPS_1_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) (ssc->steps >> STEP_SIZE_SHIFT) & STEPS_1_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /* SSC step size [24:25] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) step_size >>= STEP_SIZE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_3_MSB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) STEP_SIZE_3_MASK, (step_size & STEP_SIZE_3_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) FORCE_STEP_SIZE | FORCE_STEPS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /* Configure the lane protocol. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static void xpsgtr_lane_set_protocol(struct xpsgtr_phy *gtr_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) u8 protocol = gtr_phy->protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) switch (gtr_phy->lane) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) xpsgtr_clr_set(gtr_dev, ICM_CFG0, ICM_CFG0_L0_MASK, protocol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) xpsgtr_clr_set(gtr_dev, ICM_CFG0, ICM_CFG0_L1_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) protocol << ICM_CFG_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) xpsgtr_clr_set(gtr_dev, ICM_CFG1, ICM_CFG0_L0_MASK, protocol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) xpsgtr_clr_set(gtr_dev, ICM_CFG1, ICM_CFG0_L1_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) protocol << ICM_CFG_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /* We already checked 0 <= lane <= 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) /* Bypass (de)scrambler and 8b/10b decoder and encoder. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) static void xpsgtr_bypass_scrambler_8b10b(struct xpsgtr_phy *gtr_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) xpsgtr_write_phy(gtr_phy, L0_TM_DIG_6, L0_TM_DIS_DESCRAMBLE_DECODER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) xpsgtr_write_phy(gtr_phy, L0_TX_DIG_61, L0_TM_DISABLE_SCRAMBLE_ENCODER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /* DP-specific initialization. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) static void xpsgtr_phy_init_dp(struct xpsgtr_phy *gtr_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) xpsgtr_write_phy(gtr_phy, L0_TXPMD_TM_45,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) L0_TXPMD_TM_45_OVER_DP_MAIN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) L0_TXPMD_TM_45_ENABLE_DP_MAIN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) L0_TXPMD_TM_45_OVER_DP_POST1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) L0_TXPMD_TM_45_OVER_DP_POST2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) L0_TXPMD_TM_45_ENABLE_DP_POST2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) xpsgtr_write_phy(gtr_phy, L0_TX_ANA_TM_118,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) L0_TX_ANA_TM_118_FORCE_17_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /* SATA-specific initialization. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static void xpsgtr_phy_init_sata(struct xpsgtr_phy *gtr_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) xpsgtr_bypass_scrambler_8b10b(gtr_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) writel(gtr_phy->lane, gtr_dev->siou + SATA_CONTROL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) /* SGMII-specific initialization. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) static void xpsgtr_phy_init_sgmii(struct xpsgtr_phy *gtr_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) u32 mask = PROT_BUS_WIDTH_MASK(gtr_phy->lane);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) u32 val = PROT_BUS_WIDTH_10 << PROT_BUS_WIDTH_SHIFT(gtr_phy->lane);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) /* Set SGMII protocol TX and RX bus width to 10 bits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) xpsgtr_clr_set(gtr_dev, TX_PROT_BUS_WIDTH, mask, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) xpsgtr_clr_set(gtr_dev, RX_PROT_BUS_WIDTH, mask, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) xpsgtr_bypass_scrambler_8b10b(gtr_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) /* Configure TX de-emphasis and margining for DP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static void xpsgtr_phy_configure_dp(struct xpsgtr_phy *gtr_phy, unsigned int pre,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) unsigned int voltage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static const u8 voltage_swing[4][4] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) { 0x2a, 0x27, 0x24, 0x20 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) { 0x27, 0x23, 0x20, 0xff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) { 0x24, 0x20, 0xff, 0xff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) { 0xff, 0xff, 0xff, 0xff }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) static const u8 pre_emphasis[4][4] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) { 0x02, 0x02, 0x02, 0x02 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) { 0x01, 0x01, 0x01, 0xff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) { 0x00, 0x00, 0xff, 0xff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) { 0xff, 0xff, 0xff, 0xff }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) xpsgtr_write_phy(gtr_phy, L0_TXPMD_TM_48, voltage_swing[pre][voltage]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) xpsgtr_write_phy(gtr_phy, L0_TX_ANA_TM_18, pre_emphasis[pre][voltage]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * PHY Operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) static bool xpsgtr_phy_init_required(struct xpsgtr_phy *gtr_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * As USB may save the snapshot of the states during hibernation, doing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * phy_init() will put the USB controller into reset, resulting in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * losing of the saved snapshot. So try to avoid phy_init() for USB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * except when gtr_phy->skip_phy_init is false (this happens when FPD is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * shutdown during suspend or when gt lane is changed from current one)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (gtr_phy->protocol == ICM_PROTOCOL_USB && gtr_phy->skip_phy_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * There is a functional issue in the GT. The TX termination resistance can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * out of spec due to a issue in the calibration logic. This is the workaround
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * to fix it, required for XCZU9EG silicon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) static int xpsgtr_phy_tx_term_fix(struct xpsgtr_phy *gtr_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) u32 timeout = TIMEOUT_US;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) u32 nsw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) /* Enabling Test Mode control for CMN Rest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) /* Set Test Mode reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) xpsgtr_write(gtr_dev, L3_TM_CALIB_DIG18, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) xpsgtr_write(gtr_dev, L3_TM_CALIB_DIG19, L3_TM_OVERRIDE_NSW_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * As a part of work around sequence for PMOS calibration fix,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * we need to configure any lane ICM_CFG to valid protocol. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * will deassert the CMN_Resetn signal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) xpsgtr_lane_set_protocol(gtr_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) /* Clear Test Mode reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) dev_dbg(gtr_dev->dev, "calibrating...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) u32 reg = xpsgtr_read(gtr_dev, L3_CALIB_DONE_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if ((reg & L3_CALIB_DONE) == L3_CALIB_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (!--timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) dev_err(gtr_dev->dev, "calibration time out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) } while (timeout > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) dev_dbg(gtr_dev->dev, "calibration done\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /* Reading NMOS Register Code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) nsw = xpsgtr_read(gtr_dev, L0_TXPMA_ST_3) & L0_DN_CALIB_CODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) /* Set Test Mode reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) /* Writing NMOS register values back [5:3] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) xpsgtr_write(gtr_dev, L3_TM_CALIB_DIG19, nsw >> L3_NSW_CALIB_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) /* Writing NMOS register value [2:0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) xpsgtr_write(gtr_dev, L3_TM_CALIB_DIG18,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) ((nsw & L3_TM_CALIB_DIG19_NSW) << L3_NSW_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) (1 << L3_NSW_PIPE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) /* Clear Test Mode reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) static int xpsgtr_phy_init(struct phy *phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) mutex_lock(>r_dev->gtr_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /* Skip initialization if not required. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (!xpsgtr_phy_init_required(gtr_phy))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (gtr_dev->tx_term_fix) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) ret = xpsgtr_phy_tx_term_fix(gtr_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) gtr_dev->tx_term_fix = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) /* Enable coarse code saturation limiting logic. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) xpsgtr_write_phy(gtr_phy, L0_TM_PLL_DIG_37, L0_TM_COARSE_CODE_LIMIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * Configure the PLL, the lane protocol, and perform protocol-specific
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) xpsgtr_configure_pll(gtr_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) xpsgtr_lane_set_protocol(gtr_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) switch (gtr_phy->protocol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) case ICM_PROTOCOL_DP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) xpsgtr_phy_init_dp(gtr_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) case ICM_PROTOCOL_SATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) xpsgtr_phy_init_sata(gtr_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) case ICM_PROTOCOL_SGMII:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) xpsgtr_phy_init_sgmii(gtr_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) mutex_unlock(>r_dev->gtr_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) static int xpsgtr_phy_exit(struct phy *phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) gtr_phy->skip_phy_init = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) static int xpsgtr_phy_power_on(struct phy *phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * Wait for the PLL to lock. For DP, only wait on DP0 to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * cumulating waits for both lanes. The user is expected to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * lane 0 last.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (gtr_phy->protocol != ICM_PROTOCOL_DP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) gtr_phy->type == XPSGTR_TYPE_DP_0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) ret = xpsgtr_wait_pll_lock(phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) static int xpsgtr_phy_configure(struct phy *phy, union phy_configure_opts *opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (gtr_phy->protocol != ICM_PROTOCOL_DP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) xpsgtr_phy_configure_dp(gtr_phy, opts->dp.pre[0], opts->dp.voltage[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) static const struct phy_ops xpsgtr_phyops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) .init = xpsgtr_phy_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) .exit = xpsgtr_phy_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) .power_on = xpsgtr_phy_power_on,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) .configure = xpsgtr_phy_configure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * OF Xlate Support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) /* Set the lane type and protocol based on the PHY type and instance number. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) static int xpsgtr_set_lane_type(struct xpsgtr_phy *gtr_phy, u8 phy_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) unsigned int phy_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) unsigned int num_phy_types;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) const int *phy_types;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) switch (phy_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) case PHY_TYPE_SATA: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) static const int types[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) XPSGTR_TYPE_SATA_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) XPSGTR_TYPE_SATA_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) phy_types = types;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) num_phy_types = ARRAY_SIZE(types);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) gtr_phy->protocol = ICM_PROTOCOL_SATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) case PHY_TYPE_USB3: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) static const int types[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) XPSGTR_TYPE_USB0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) XPSGTR_TYPE_USB1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) phy_types = types;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) num_phy_types = ARRAY_SIZE(types);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) gtr_phy->protocol = ICM_PROTOCOL_USB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) case PHY_TYPE_DP: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) static const int types[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) XPSGTR_TYPE_DP_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) XPSGTR_TYPE_DP_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) phy_types = types;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) num_phy_types = ARRAY_SIZE(types);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) gtr_phy->protocol = ICM_PROTOCOL_DP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) case PHY_TYPE_PCIE: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) static const int types[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) XPSGTR_TYPE_PCIE_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) XPSGTR_TYPE_PCIE_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) XPSGTR_TYPE_PCIE_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) XPSGTR_TYPE_PCIE_3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) phy_types = types;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) num_phy_types = ARRAY_SIZE(types);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) gtr_phy->protocol = ICM_PROTOCOL_PCIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) case PHY_TYPE_SGMII: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) static const int types[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) XPSGTR_TYPE_SGMII0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) XPSGTR_TYPE_SGMII1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) XPSGTR_TYPE_SGMII2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) XPSGTR_TYPE_SGMII3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) phy_types = types;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) num_phy_types = ARRAY_SIZE(types);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) gtr_phy->protocol = ICM_PROTOCOL_SGMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (phy_instance >= num_phy_types)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) gtr_phy->type = phy_types[phy_instance];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * Valid combinations of controllers and lanes (Interconnect Matrix).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) static const unsigned int icm_matrix[NUM_LANES][CONTROLLERS_PER_LANE] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) { XPSGTR_TYPE_PCIE_0, XPSGTR_TYPE_SATA_0, XPSGTR_TYPE_USB0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) XPSGTR_TYPE_DP_1, XPSGTR_TYPE_SGMII0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) { XPSGTR_TYPE_PCIE_1, XPSGTR_TYPE_SATA_1, XPSGTR_TYPE_USB0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) XPSGTR_TYPE_DP_0, XPSGTR_TYPE_SGMII1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) { XPSGTR_TYPE_PCIE_2, XPSGTR_TYPE_SATA_0, XPSGTR_TYPE_USB0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) XPSGTR_TYPE_DP_1, XPSGTR_TYPE_SGMII2 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) { XPSGTR_TYPE_PCIE_3, XPSGTR_TYPE_SATA_1, XPSGTR_TYPE_USB1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) XPSGTR_TYPE_DP_0, XPSGTR_TYPE_SGMII3 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) /* Translate OF phandle and args to PHY instance. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) static struct phy *xpsgtr_xlate(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) struct of_phandle_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) struct xpsgtr_phy *gtr_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) unsigned int phy_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) unsigned int phy_lane;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) unsigned int phy_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) unsigned int refclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (args->args_count != 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) dev_err(dev, "Invalid number of cells in 'phy' property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * Get the PHY parameters from the OF arguments and derive the lane
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) phy_lane = args->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (phy_lane >= ARRAY_SIZE(gtr_dev->phys)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) dev_err(dev, "Invalid lane number %u\n", phy_lane);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) gtr_phy = >r_dev->phys[phy_lane];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) phy_type = args->args[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) phy_instance = args->args[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) ret = xpsgtr_set_lane_type(gtr_phy, phy_type, phy_instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) dev_err(gtr_dev->dev, "Invalid PHY type and/or instance\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) refclk = args->args[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (refclk >= ARRAY_SIZE(gtr_dev->refclk_sscs) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) !gtr_dev->refclk_sscs[refclk]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) dev_err(dev, "Invalid reference clock number %u\n", refclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) gtr_phy->refclk = refclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * Ensure that the Interconnect Matrix is obeyed, i.e a given lane type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * is allowed to operate on the lane.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) for (i = 0; i < CONTROLLERS_PER_LANE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (icm_matrix[phy_lane][i] == gtr_phy->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) return gtr_phy->phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * Power Management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) static int __maybe_unused xpsgtr_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) /* Save the snapshot ICM_CFG registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) gtr_dev->saved_icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) gtr_dev->saved_icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) static int __maybe_unused xpsgtr_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) unsigned int icm_cfg0, icm_cfg1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) bool skip_phy_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) /* Return if no GT lanes got configured before suspend. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (!gtr_dev->saved_icm_cfg0 && !gtr_dev->saved_icm_cfg1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) /* Check if the ICM configurations changed after suspend. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (icm_cfg0 == gtr_dev->saved_icm_cfg0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) icm_cfg1 == gtr_dev->saved_icm_cfg1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) skip_phy_init = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) skip_phy_init = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) /* Update the skip_phy_init for all gtr_phy instances. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) for (i = 0; i < ARRAY_SIZE(gtr_dev->phys); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) gtr_dev->phys[i].skip_phy_init = skip_phy_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) static const struct dev_pm_ops xpsgtr_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) SET_SYSTEM_SLEEP_PM_OPS(xpsgtr_suspend, xpsgtr_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * Probe & Platform Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) static int xpsgtr_get_ref_clocks(struct xpsgtr_dev *gtr_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) unsigned int refclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) for (refclk = 0; refclk < ARRAY_SIZE(gtr_dev->refclk_sscs); ++refclk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) unsigned long rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) char name[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) snprintf(name, sizeof(name), "ref%u", refclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) clk = devm_clk_get_optional(gtr_dev->dev, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (IS_ERR(clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (PTR_ERR(clk) != -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) dev_err(gtr_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) "Failed to get reference clock %u: %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) refclk, PTR_ERR(clk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) return PTR_ERR(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (!clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * Get the spread spectrum (SSC) settings for the reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * clock rate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) rate = clk_get_rate(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) for (i = 0 ; i < ARRAY_SIZE(ssc_lookup); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (rate == ssc_lookup[i].refclk_rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) gtr_dev->refclk_sscs[refclk] = &ssc_lookup[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) if (i == ARRAY_SIZE(ssc_lookup)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) dev_err(gtr_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) "Invalid rate %lu for reference clock %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) rate, refclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) static int xpsgtr_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) struct device_node *np = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) struct xpsgtr_dev *gtr_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) struct phy_provider *provider;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) unsigned int port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) gtr_dev = devm_kzalloc(&pdev->dev, sizeof(*gtr_dev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (!gtr_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) gtr_dev->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) platform_set_drvdata(pdev, gtr_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) mutex_init(>r_dev->gtr_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (of_device_is_compatible(np, "xlnx,zynqmp-psgtr"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) gtr_dev->tx_term_fix =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) of_property_read_bool(np, "xlnx,tx-termination-fix");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) /* Acquire resources. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) gtr_dev->serdes = devm_platform_ioremap_resource_byname(pdev, "serdes");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (IS_ERR(gtr_dev->serdes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) return PTR_ERR(gtr_dev->serdes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) gtr_dev->siou = devm_platform_ioremap_resource_byname(pdev, "siou");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) if (IS_ERR(gtr_dev->siou))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) return PTR_ERR(gtr_dev->siou);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) ret = xpsgtr_get_ref_clocks(gtr_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) /* Create PHYs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) for (port = 0; port < ARRAY_SIZE(gtr_dev->phys); ++port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) struct xpsgtr_phy *gtr_phy = >r_dev->phys[port];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) struct phy *phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) gtr_phy->lane = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) gtr_phy->dev = gtr_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) phy = devm_phy_create(&pdev->dev, np, &xpsgtr_phyops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (IS_ERR(phy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) dev_err(&pdev->dev, "failed to create PHY\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) return PTR_ERR(phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) gtr_phy->phy = phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) phy_set_drvdata(phy, gtr_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) /* Register the PHY provider. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) provider = devm_of_phy_provider_register(&pdev->dev, xpsgtr_xlate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (IS_ERR(provider)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) dev_err(&pdev->dev, "registering provider failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) return PTR_ERR(provider);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) static const struct of_device_id xpsgtr_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) { .compatible = "xlnx,zynqmp-psgtr", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) { .compatible = "xlnx,zynqmp-psgtr-v1.1", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) MODULE_DEVICE_TABLE(of, xpsgtr_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) static struct platform_driver xpsgtr_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) .probe = xpsgtr_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) .name = "xilinx-psgtr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) .of_match_table = xpsgtr_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) .pm = &xpsgtr_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) module_platform_driver(xpsgtr_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) MODULE_AUTHOR("Xilinx Inc.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) MODULE_DESCRIPTION("Xilinx ZynqMP High speed Gigabit Transceiver");