^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2010 Google, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/pinctrl/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/regulator/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/reset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/mmc/card.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/mmc/host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/mmc/mmc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/mmc/slot-gpio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/gpio/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/ktime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "sdhci-pltfm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "cqhci.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /* Tegra SDHOST controller vendor register definitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define SDHCI_TEGRA_VENDOR_CLOCK_CTRL 0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define SDHCI_CLOCK_CTRL_TAP_MASK 0x00ff0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define SDHCI_CLOCK_CTRL_TAP_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define SDHCI_CLOCK_CTRL_TRIM_MASK 0x1f000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define SDHCI_CLOCK_CTRL_TRIM_SHIFT 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define SDHCI_CLOCK_CTRL_PADPIPE_CLKEN_OVERRIDE BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define SDHCI_TEGRA_VENDOR_SYS_SW_CTRL 0x104
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define SDHCI_TEGRA_VENDOR_CAP_OVERRIDES 0x10c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK 0x00003f00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define SDHCI_TEGRA_VENDOR_MISC_CTRL 0x120
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define SDHCI_MISC_CTRL_ENABLE_SDR104 0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define SDHCI_MISC_CTRL_ENABLE_SDR50 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define SDHCI_MISC_CTRL_ENABLE_DDR50 0x200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define SDHCI_TEGRA_VENDOR_DLLCAL_CFG 0x1b0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define SDHCI_TEGRA_DLLCAL_CALIBRATE BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define SDHCI_TEGRA_VENDOR_DLLCAL_STA 0x1bc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define SDHCI_TEGRA_DLLCAL_STA_ACTIVE BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define SDHCI_VNDR_TUN_CTRL0_0 0x1c0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP 0x20000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK 0x03fc0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT 18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK 0x00001fc0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK 0x000e000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT 13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define TRIES_128 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define TRIES_256 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK 0x7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define SDHCI_TEGRA_VNDR_TUN_CTRL1_0 0x1c4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define SDHCI_TEGRA_VNDR_TUN_STATUS0 0x1C8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define SDHCI_TEGRA_VNDR_TUN_STATUS1 0x1CC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK 0xFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT 0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define TUNING_WORD_BIT_SIZE 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define SDHCI_TEGRA_AUTO_CAL_CONFIG 0x1e4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define SDHCI_AUTO_CAL_START BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define SDHCI_AUTO_CAL_ENABLE BIT(29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define SDHCI_AUTO_CAL_PDPU_OFFSET_MASK 0x0000ffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL 0x1e0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK 0x0000000f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL 0x7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK 0x07FFF000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define SDHCI_TEGRA_AUTO_CAL_STATUS 0x1ec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define SDHCI_TEGRA_AUTO_CAL_ACTIVE BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define NVQUIRK_ENABLE_SDHCI_SPEC_300 BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define NVQUIRK_ENABLE_SDR50 BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define NVQUIRK_ENABLE_SDR104 BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define NVQUIRK_ENABLE_DDR50 BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * HAS_PADCALIB NVQUIRK is for SoC's supporting auto calibration of pads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * drive strength.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define NVQUIRK_HAS_PADCALIB BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * NEEDS_PAD_CONTROL NVQUIRK is for SoC's having separate 3V3 and 1V8 pads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * 3V3/1V8 pad selection happens through pinctrl state selection depending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * on the signaling mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define NVQUIRK_NEEDS_PAD_CONTROL BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * NVQUIRK_HAS_TMCLK is for SoC's having separate timeout clock for Tegra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * SDMMC hardware data timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define NVQUIRK_HAS_TMCLK BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define SDHCI_TEGRA_CQE_BASE_ADDR 0xF000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define SDHCI_TEGRA_CQE_TRNS_MODE (SDHCI_TRNS_MULTI | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) SDHCI_TRNS_BLK_CNT_EN | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) SDHCI_TRNS_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct sdhci_tegra_soc_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) const struct sdhci_pltfm_data *pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) u64 dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) u32 nvquirks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) u8 min_tap_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) u8 max_tap_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* Magic pull up and pull down pad calibration offsets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct sdhci_tegra_autocal_offsets {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) u32 pull_up_3v3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) u32 pull_down_3v3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) u32 pull_up_3v3_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) u32 pull_down_3v3_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) u32 pull_up_1v8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) u32 pull_down_1v8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) u32 pull_up_1v8_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) u32 pull_down_1v8_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) u32 pull_up_sdr104;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) u32 pull_down_sdr104;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) u32 pull_up_hs400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) u32 pull_down_hs400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct sdhci_tegra {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) const struct sdhci_tegra_soc_data *soc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct gpio_desc *power_gpio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct clk *tmclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) bool ddr_signaling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) bool pad_calib_required;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) bool pad_control_available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct reset_control *rst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct pinctrl *pinctrl_sdmmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct pinctrl_state *pinctrl_state_3v3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct pinctrl_state *pinctrl_state_1v8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct pinctrl_state *pinctrl_state_3v3_drv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct pinctrl_state *pinctrl_state_1v8_drv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct sdhci_tegra_autocal_offsets autocal_offsets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) ktime_t last_calib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) u32 default_tap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) u32 default_trim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) u32 dqs_trim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) bool enable_hwcq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) unsigned long curr_clk_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) u8 tuned_tap_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) (reg == SDHCI_HOST_VERSION))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /* Erratum: Version register is invalid in HW. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return SDHCI_SPEC_200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return readw(host->ioaddr + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) switch (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) case SDHCI_TRANSFER_MODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * Postpone this write, we must do it together with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * command write that is down below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) pltfm_host->xfer_mode_shadow = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) case SDHCI_COMMAND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) writel((val << 16) | pltfm_host->xfer_mode_shadow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) host->ioaddr + SDHCI_TRANSFER_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) writew(val, host->ioaddr + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /* Seems like we're getting spurious timeout and crc errors, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * disable signalling of them. In case of real errors software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * timers should take care of eventually detecting them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) writel(val, host->ioaddr + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) (reg == SDHCI_INT_ENABLE))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /* Erratum: Must enable block gap interrupt detection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (val & SDHCI_INT_CARD_INT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) gap_ctrl |= 0x8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) gap_ctrl &= ~0x8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static bool tegra_sdhci_configure_card_clk(struct sdhci_host *host, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) bool status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) status = !!(reg & SDHCI_CLOCK_CARD_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (status == enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) reg |= SDHCI_CLOCK_CARD_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) reg &= ~SDHCI_CLOCK_CARD_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) sdhci_writew(host, reg, SDHCI_CLOCK_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) bool is_tuning_cmd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) bool clk_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) u8 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (reg == SDHCI_COMMAND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) cmd = SDHCI_GET_CMD(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) is_tuning_cmd = cmd == MMC_SEND_TUNING_BLOCK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) cmd == MMC_SEND_TUNING_BLOCK_HS200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (is_tuning_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) clk_enabled = tegra_sdhci_configure_card_clk(host, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) writew(val, host->ioaddr + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (is_tuning_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) tegra_sdhci_configure_card_clk(host, clk_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * Write-enable shall be assumed if GPIO is missing in a board's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * device-tree because SDHCI's WRITE_PROTECT bit doesn't work on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * Tegra.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return mmc_gpio_get_ro(host->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) int has_1v8, has_3v3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * The SoCs which have NVQUIRK_NEEDS_PAD_CONTROL require software pad
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * voltage configuration in order to perform voltage switching. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * means that valid pinctrl info is required on SDHCI instances capable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * of performing voltage switching. Whether or not an SDHCI instance is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * capable of voltage switching is determined based on the regulator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (IS_ERR(host->mmc->supply.vqmmc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) has_1v8 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 1700000, 1950000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) has_3v3 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 2700000, 3600000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (has_1v8 == 1 && has_3v3 == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return tegra_host->pad_control_available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /* Fixed voltage, no pad control required. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) bool card_clk_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * Touching the tap values is a bit tricky on some SoC generations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * The quirk enables a workaround for a glitch that sometimes occurs if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * the tap values are changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) reg &= ~SDHCI_CLOCK_CTRL_TAP_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) reg |= tap << SDHCI_CLOCK_CTRL_TAP_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) card_clk_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) tegra_sdhci_configure_card_clk(host, card_clk_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) u32 misc_ctrl, clk_ctrl, pad_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) sdhci_reset(host, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (!(mask & SDHCI_RESET_ALL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) tegra_sdhci_set_tap(host, tegra_host->default_tap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) misc_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) misc_ctrl &= ~(SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) SDHCI_MISC_CTRL_ENABLE_SDR50 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) SDHCI_MISC_CTRL_ENABLE_DDR50 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) SDHCI_MISC_CTRL_ENABLE_SDR104);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) clk_ctrl &= ~(SDHCI_CLOCK_CTRL_TRIM_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) if (tegra_sdhci_is_pad_and_regulator_valid(host)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /* Erratum: Enable SDHCI spec v3.00 support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /* Advertise UHS modes as supported by host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) clk_ctrl |= tegra_host->default_trim << SDHCI_CLOCK_CTRL_TRIM_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) sdhci_writel(host, clk_ctrl, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) pad_ctrl = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) pad_ctrl &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) pad_ctrl |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) sdhci_writel(host, pad_ctrl, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) tegra_host->pad_calib_required = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) tegra_host->ddr_signaling = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) static void tegra_sdhci_configure_cal_pad(struct sdhci_host *host, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * Enable or disable the additional I/O pad used by the drive strength
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * calibration process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) val = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) val |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) val &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) sdhci_writel(host, val, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) usleep_range(1, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static void tegra_sdhci_set_pad_autocal_offset(struct sdhci_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) u16 pdpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) reg &= ~SDHCI_AUTO_CAL_PDPU_OFFSET_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) reg |= pdpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) static int tegra_sdhci_set_padctrl(struct sdhci_host *host, int voltage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) bool state_drvupdn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) struct sdhci_tegra_autocal_offsets *offsets =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) &tegra_host->autocal_offsets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) struct pinctrl_state *pinctrl_drvupdn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) u8 drvup = 0, drvdn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (!state_drvupdn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) /* PADS Drive Strength */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (voltage == MMC_SIGNAL_VOLTAGE_180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (tegra_host->pinctrl_state_1v8_drv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) pinctrl_drvupdn =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) tegra_host->pinctrl_state_1v8_drv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) drvup = offsets->pull_up_1v8_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) drvdn = offsets->pull_down_1v8_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (tegra_host->pinctrl_state_3v3_drv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) pinctrl_drvupdn =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) tegra_host->pinctrl_state_3v3_drv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) drvup = offsets->pull_up_3v3_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) drvdn = offsets->pull_down_3v3_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (pinctrl_drvupdn != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) pinctrl_drvupdn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) dev_err(mmc_dev(host->mmc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) "failed pads drvupdn, ret: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) } else if ((drvup) || (drvdn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) reg = sdhci_readl(host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) reg &= ~SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) reg |= (drvup << 20) | (drvdn << 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) sdhci_writel(host, reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) /* Dual Voltage PADS Voltage selection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (!tegra_host->pad_control_available)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (voltage == MMC_SIGNAL_VOLTAGE_180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) tegra_host->pinctrl_state_1v8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) dev_err(mmc_dev(host->mmc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) "setting 1.8V failed, ret: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) tegra_host->pinctrl_state_3v3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) dev_err(mmc_dev(host->mmc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) "setting 3.3V failed, ret: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) static void tegra_sdhci_pad_autocalib(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) struct sdhci_tegra_autocal_offsets offsets =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) tegra_host->autocal_offsets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct mmc_ios *ios = &host->mmc->ios;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) bool card_clk_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) u16 pdpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) switch (ios->timing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) case MMC_TIMING_UHS_SDR104:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) pdpu = offsets.pull_down_sdr104 << 8 | offsets.pull_up_sdr104;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) case MMC_TIMING_MMC_HS400:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) pdpu = offsets.pull_down_hs400 << 8 | offsets.pull_up_hs400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) pdpu = offsets.pull_down_1v8 << 8 | offsets.pull_up_1v8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) pdpu = offsets.pull_down_3v3 << 8 | offsets.pull_up_3v3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) /* Set initial offset before auto-calibration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) tegra_sdhci_set_pad_autocal_offset(host, pdpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) tegra_sdhci_configure_cal_pad(host, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) reg |= SDHCI_AUTO_CAL_ENABLE | SDHCI_AUTO_CAL_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) usleep_range(1, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) /* 10 ms timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) ret = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_AUTO_CAL_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) reg, !(reg & SDHCI_TEGRA_AUTO_CAL_ACTIVE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 1000, 10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) tegra_sdhci_configure_cal_pad(host, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) tegra_sdhci_configure_card_clk(host, card_clk_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) dev_err(mmc_dev(host->mmc), "Pad autocal timed out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /* Disable automatic cal and use fixed Drive Strengths */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) reg &= ~SDHCI_AUTO_CAL_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) dev_err(mmc_dev(host->mmc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) "Setting drive strengths failed: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) struct sdhci_tegra_autocal_offsets *autocal =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) &tegra_host->autocal_offsets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) err = device_property_read_u32(host->mmc->parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) "nvidia,pad-autocal-pull-up-offset-3v3",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) &autocal->pull_up_3v3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) autocal->pull_up_3v3 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) err = device_property_read_u32(host->mmc->parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) "nvidia,pad-autocal-pull-down-offset-3v3",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) &autocal->pull_down_3v3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) autocal->pull_down_3v3 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) err = device_property_read_u32(host->mmc->parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) "nvidia,pad-autocal-pull-up-offset-1v8",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) &autocal->pull_up_1v8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) autocal->pull_up_1v8 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) err = device_property_read_u32(host->mmc->parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) "nvidia,pad-autocal-pull-down-offset-1v8",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) &autocal->pull_down_1v8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) autocal->pull_down_1v8 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) err = device_property_read_u32(host->mmc->parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) "nvidia,pad-autocal-pull-up-offset-sdr104",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) &autocal->pull_up_sdr104);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) autocal->pull_up_sdr104 = autocal->pull_up_1v8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) err = device_property_read_u32(host->mmc->parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) "nvidia,pad-autocal-pull-down-offset-sdr104",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) &autocal->pull_down_sdr104);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) autocal->pull_down_sdr104 = autocal->pull_down_1v8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) err = device_property_read_u32(host->mmc->parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) "nvidia,pad-autocal-pull-up-offset-hs400",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) &autocal->pull_up_hs400);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) autocal->pull_up_hs400 = autocal->pull_up_1v8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) err = device_property_read_u32(host->mmc->parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) "nvidia,pad-autocal-pull-down-offset-hs400",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) &autocal->pull_down_hs400);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) autocal->pull_down_hs400 = autocal->pull_down_1v8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * Different fail-safe drive strength values based on the signaling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * voltage are applicable for SoCs supporting 3V3 and 1V8 pad controls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * So, avoid reading below device tree properties for SoCs that don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * have NVQUIRK_NEEDS_PAD_CONTROL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) err = device_property_read_u32(host->mmc->parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) "nvidia,pad-autocal-pull-up-offset-3v3-timeout",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) &autocal->pull_up_3v3_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) (tegra_host->pinctrl_state_3v3_drv == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) mmc_hostname(host->mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) autocal->pull_up_3v3_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) err = device_property_read_u32(host->mmc->parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) "nvidia,pad-autocal-pull-down-offset-3v3-timeout",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) &autocal->pull_down_3v3_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) (tegra_host->pinctrl_state_3v3_drv == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) mmc_hostname(host->mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) autocal->pull_down_3v3_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) err = device_property_read_u32(host->mmc->parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) "nvidia,pad-autocal-pull-up-offset-1v8-timeout",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) &autocal->pull_up_1v8_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) (tegra_host->pinctrl_state_1v8_drv == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) mmc_hostname(host->mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) autocal->pull_up_1v8_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) err = device_property_read_u32(host->mmc->parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) "nvidia,pad-autocal-pull-down-offset-1v8-timeout",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) &autocal->pull_down_1v8_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) (tegra_host->pinctrl_state_1v8_drv == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) mmc_hostname(host->mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) autocal->pull_down_1v8_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) static void tegra_sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) struct sdhci_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) ktime_t since_calib = ktime_sub(ktime_get(), tegra_host->last_calib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) /* 100 ms calibration interval is specified in the TRM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (ktime_to_ms(since_calib) > 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) tegra_sdhci_pad_autocalib(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) tegra_host->last_calib = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) sdhci_request(mmc, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) static void tegra_sdhci_parse_tap_and_trim(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) err = device_property_read_u32(host->mmc->parent, "nvidia,default-tap",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) &tegra_host->default_tap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) tegra_host->default_tap = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) err = device_property_read_u32(host->mmc->parent, "nvidia,default-trim",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) &tegra_host->default_trim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) tegra_host->default_trim = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) err = device_property_read_u32(host->mmc->parent, "nvidia,dqs-trim",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) &tegra_host->dqs_trim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) tegra_host->dqs_trim = 0x11;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) static void tegra_sdhci_parse_dt(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (device_property_read_bool(host->mmc->parent, "supports-cqe"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) tegra_host->enable_hwcq = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) tegra_host->enable_hwcq = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) tegra_sdhci_parse_pad_autocal_dt(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) tegra_sdhci_parse_tap_and_trim(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) unsigned long host_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) if (!clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) return sdhci_set_clock(host, clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * In DDR50/52 modes the Tegra SDHCI controllers require the SDHCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * divider to be configured to divided the host clock by two. The SDHCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * clock divider is calculated as part of sdhci_set_clock() by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * sdhci_calc_clk(). The divider is calculated from host->max_clk and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * the requested clock rate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * By setting the host->max_clk to clock * 2 the divider calculation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * will always result in the correct value for DDR50/52 modes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * regardless of clock rate rounding, which may happen if the value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * from clk_get_rate() is used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) clk_set_rate(pltfm_host->clk, host_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) tegra_host->curr_clk_rate = host_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) if (tegra_host->ddr_signaling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) host->max_clk = host_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) host->max_clk = clk_get_rate(pltfm_host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) sdhci_set_clock(host, clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) if (tegra_host->pad_calib_required) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) tegra_sdhci_pad_autocalib(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) tegra_host->pad_calib_required = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) struct mmc_ios *ios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) struct sdhci_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (ios->enhanced_strobe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) * When CMD13 is sent from mmc_select_hs400es() after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) * switching to HS400ES mode, the bus is operating at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * either MMC_HIGH_26_MAX_DTR or MMC_HIGH_52_MAX_DTR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) * To meet Tegra SDHCI requirement at HS400ES mode, force SDHCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) * interface clock to MMC_HS200_MAX_DTR (200 MHz) so that host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * controller CAR clock and the interface clock are rate matched.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) tegra_sdhci_set_clock(host, MMC_HS200_MAX_DTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) return clk_round_rate(pltfm_host->clk, UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) static void tegra_sdhci_set_dqs_trim(struct sdhci_host *host, u8 trim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) val &= ~SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) val |= trim << SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) static void tegra_sdhci_hs400_dll_cal(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) reg |= SDHCI_TEGRA_DLLCAL_CALIBRATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) /* 1 ms sleep, 5 ms timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) err = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_VENDOR_DLLCAL_STA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) reg, !(reg & SDHCI_TEGRA_DLLCAL_STA_ACTIVE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 1000, 5000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) dev_err(mmc_dev(host->mmc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) "HS400 delay line calibration timed out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) static void tegra_sdhci_tap_correction(struct sdhci_host *host, u8 thd_up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) u8 thd_low, u8 fixed_tap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) u32 val, tun_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) u8 word, bit, edge1, tap, window;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) bool tap_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) bool start_fail = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) bool start_pass = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) bool end_pass = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) bool first_fail = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) bool first_pass = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) u8 start_pass_tap = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) u8 end_pass_tap = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) u8 first_fail_tap = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) u8 first_pass_tap = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) u8 total_tuning_words = host->tuning_loop_count / TUNING_WORD_BIT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) * Read auto-tuned results and extract good valid passing window by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * filtering out un-wanted bubble/partial/merged windows.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) for (word = 0; word < total_tuning_words; word++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) val &= ~SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) val |= word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) tun_status = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) bit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) while (bit < TUNING_WORD_BIT_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) tap = word * TUNING_WORD_BIT_SIZE + bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) tap_result = tun_status & (1 << bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (!tap_result && !start_fail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) start_fail = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (!first_fail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) first_fail_tap = tap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) first_fail = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) } else if (tap_result && start_fail && !start_pass) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) start_pass_tap = tap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) start_pass = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (!first_pass) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) first_pass_tap = tap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) first_pass = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) } else if (!tap_result && start_fail && start_pass &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) !end_pass) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) end_pass_tap = tap - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) end_pass = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) } else if (tap_result && start_pass && start_fail &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) end_pass) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) window = end_pass_tap - start_pass_tap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) /* discard merged window and bubble window */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (window >= thd_up || window < thd_low) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) start_pass_tap = tap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) end_pass = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) /* set tap at middle of valid window */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) tap = start_pass_tap + window / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) tegra_host->tuned_tap_delay = tap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) bit++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (!first_fail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) WARN(1, "no edge detected, continue with hw tuned delay.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) } else if (first_pass) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) /* set tap location at fixed tap relative to the first edge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) edge1 = first_fail_tap + (first_pass_tap - first_fail_tap) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) if (edge1 - 1 > fixed_tap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) tegra_host->tuned_tap_delay = edge1 - fixed_tap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) tegra_host->tuned_tap_delay = edge1 + fixed_tap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) static void tegra_sdhci_post_tuning(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) u32 avg_tap_dly, val, min_tap_dly, max_tap_dly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) u8 fixed_tap, start_tap, end_tap, window_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) u8 thdupper, thdlower;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) u8 num_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) u32 clk_rate_mhz, period_ps, bestcase, worstcase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) /* retain HW tuned tap to use incase if no correction is needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) tegra_host->tuned_tap_delay = (val & SDHCI_CLOCK_CTRL_TAP_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) SDHCI_CLOCK_CTRL_TAP_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (soc_data->min_tap_delay && soc_data->max_tap_delay) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) min_tap_dly = soc_data->min_tap_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) max_tap_dly = soc_data->max_tap_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) clk_rate_mhz = tegra_host->curr_clk_rate / USEC_PER_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) period_ps = USEC_PER_SEC / clk_rate_mhz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) bestcase = period_ps / min_tap_dly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) worstcase = period_ps / max_tap_dly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * Upper and Lower bound thresholds used to detect merged and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * bubble windows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) thdupper = (2 * worstcase + bestcase) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) thdlower = worstcase / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) * fixed tap is used when HW tuning result contains single edge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) * and tap is set at fixed tap delay relative to the first edge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) avg_tap_dly = (period_ps * 2) / (min_tap_dly + max_tap_dly);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) fixed_tap = avg_tap_dly / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) val = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) start_tap = val & SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) end_tap = (val >> SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) window_width = end_tap - start_tap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) num_iter = host->tuning_loop_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) * partial window includes edges of the tuning range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * merged window includes more taps so window width is higher
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) * than upper threshold.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (start_tap == 0 || (end_tap == (num_iter - 1)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) (end_tap == num_iter - 2) || window_width >= thdupper) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) pr_debug("%s: Apply tuning correction\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) mmc_hostname(host->mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) tegra_sdhci_tap_correction(host, thdupper, thdlower,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) fixed_tap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) static int tegra_sdhci_execute_hw_tuning(struct mmc_host *mmc, u32 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) struct sdhci_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) err = sdhci_execute_tuning(mmc, opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (!err && !host->tuning_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) tegra_sdhci_post_tuning(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) unsigned timing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) bool set_default_tap = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) bool set_dqs_trim = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) bool do_hs400_dll_cal = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) u8 iter = TRIES_256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) tegra_host->ddr_signaling = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) switch (timing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) case MMC_TIMING_UHS_SDR50:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) case MMC_TIMING_UHS_SDR104:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) case MMC_TIMING_MMC_HS200:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) /* Don't set default tap on tunable modes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) iter = TRIES_128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) case MMC_TIMING_MMC_HS400:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) set_dqs_trim = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) do_hs400_dll_cal = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) iter = TRIES_128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) case MMC_TIMING_MMC_DDR52:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) case MMC_TIMING_UHS_DDR50:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) tegra_host->ddr_signaling = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) set_default_tap = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) set_default_tap = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) val &= ~(SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) val |= (iter << SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 0 << SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 1 << SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) sdhci_writel(host, 0, SDHCI_TEGRA_VNDR_TUN_CTRL1_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) host->tuning_loop_count = (iter == TRIES_128) ? 128 : 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) sdhci_set_uhs_signaling(host, timing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) tegra_sdhci_pad_autocalib(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) if (tegra_host->tuned_tap_delay && !set_default_tap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) tegra_sdhci_set_tap(host, tegra_host->default_tap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) if (set_dqs_trim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) tegra_sdhci_set_dqs_trim(host, tegra_host->dqs_trim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) if (do_hs400_dll_cal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) tegra_sdhci_hs400_dll_cal(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) unsigned int min, max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) * Start search for minimum tap value at 10, as smaller values are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) * may wrongly be reported as working but fail at higher speeds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) * according to the TRM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) min = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) while (min < 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) tegra_sdhci_set_tap(host, min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) if (!mmc_send_tuning(host->mmc, opcode, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) min++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) /* Find the maximum tap value that still passes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) max = min + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) while (max < 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) tegra_sdhci_set_tap(host, max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) if (mmc_send_tuning(host->mmc, opcode, NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) max--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) max++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) /* The TRM states the ideal tap value is at 75% in the passing range. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) tegra_sdhci_set_tap(host, min + ((max - min) * 3 / 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) return mmc_send_tuning(host->mmc, opcode, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) struct mmc_ios *ios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) struct sdhci_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) ret = sdhci_start_signal_voltage_switch(mmc, ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) } else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) ret = sdhci_start_signal_voltage_switch(mmc, ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) if (tegra_host->pad_calib_required)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) tegra_sdhci_pad_autocalib(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) static int tegra_sdhci_init_pinctrl_info(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) struct sdhci_tegra *tegra_host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) tegra_host->pinctrl_sdmmc = devm_pinctrl_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) if (IS_ERR(tegra_host->pinctrl_sdmmc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) dev_dbg(dev, "No pinctrl info, err: %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) PTR_ERR(tegra_host->pinctrl_sdmmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) tegra_host->pinctrl_state_1v8_drv = pinctrl_lookup_state(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) tegra_host->pinctrl_sdmmc, "sdmmc-1v8-drv");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) if (IS_ERR(tegra_host->pinctrl_state_1v8_drv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) if (PTR_ERR(tegra_host->pinctrl_state_1v8_drv) == -ENODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) tegra_host->pinctrl_state_1v8_drv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) tegra_host->pinctrl_state_3v3_drv = pinctrl_lookup_state(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) tegra_host->pinctrl_sdmmc, "sdmmc-3v3-drv");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) if (IS_ERR(tegra_host->pinctrl_state_3v3_drv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) if (PTR_ERR(tegra_host->pinctrl_state_3v3_drv) == -ENODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) tegra_host->pinctrl_state_3v3_drv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) tegra_host->pinctrl_state_3v3 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-3v3");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if (IS_ERR(tegra_host->pinctrl_state_3v3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) dev_warn(dev, "Missing 3.3V pad state, err: %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) PTR_ERR(tegra_host->pinctrl_state_3v3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) tegra_host->pinctrl_state_1v8 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-1v8");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) if (IS_ERR(tegra_host->pinctrl_state_1v8)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) dev_warn(dev, "Missing 1.8V pad state, err: %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) PTR_ERR(tegra_host->pinctrl_state_1v8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) tegra_host->pad_control_available = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) static void tegra_sdhci_voltage_switch(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) tegra_host->pad_calib_required = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) struct mmc_host *mmc = cq_host->mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) struct sdhci_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) u8 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) ktime_t timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) bool timed_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) * During CQE resume/unhalt, CQHCI driver unhalts CQE prior to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) * cqhci_host_ops enable where SDHCI DMA and BLOCK_SIZE registers need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * to be re-configured.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * Tegra CQHCI/SDHCI prevents write access to block size register when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * CQE is unhalted. So handling CQE resume sequence here to configure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) * SDHCI block registers prior to exiting CQE halt state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) if (reg == CQHCI_CTL && !(val & CQHCI_HALT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) sdhci_cqe_enable(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) writel(val, cq_host->mmio + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) timeout = ktime_add_us(ktime_get(), 50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) timed_out = ktime_compare(ktime_get(), timeout) > 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) ctrl = cqhci_readl(cq_host, CQHCI_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) if (!(ctrl & CQHCI_HALT) || timed_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) * CQE usually resumes very quick, but incase if Tegra CQE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) * doesn't resume retry unhalt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (timed_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) writel(val, cq_host->mmio + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) writel(val, cq_host->mmio + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) struct mmc_request *mrq, u64 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(mmc_priv(mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) if (soc_data->nvquirks & NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) mrq->cmd->flags & MMC_RSP_R1B)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) *data |= CQHCI_CMD_TIMING(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) struct cqhci_host *cq_host = mmc->cqe_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) struct sdhci_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) * Tegra CQHCI/SDMMC design prevents write access to sdhci block size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) * register when CQE is enabled and unhalted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) * CQHCI driver enables CQE prior to activation, so disable CQE before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) * programming block size in sdhci controller and enable it back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) if (!cq_host->activated) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) val = cqhci_readl(cq_host, CQHCI_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) if (val & CQHCI_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) cqhci_writel(cq_host, (val & ~CQHCI_ENABLE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) CQHCI_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) sdhci_cqe_enable(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) if (val & CQHCI_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) cqhci_writel(cq_host, val, CQHCI_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) * CMD CRC errors are seen sometimes with some eMMC devices when status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) * command is sent during transfer of last data block which is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) * default case as send status command block counter (CBC) is 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) * Recommended fix to set CBC to 0 allowing send status command only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) * when data lines are idle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) val = cqhci_readl(cq_host, CQHCI_SSC1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) val &= ~CQHCI_SSC1_CBC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) cqhci_writel(cq_host, val, CQHCI_SSC1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) static void sdhci_tegra_dumpregs(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) sdhci_dumpregs(mmc_priv(mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) static u32 sdhci_tegra_cqhci_irq(struct sdhci_host *host, u32 intmask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) int cmd_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) int data_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) return intmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) cqhci_irq(host->mmc, intmask, cmd_error, data_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) static void tegra_sdhci_set_timeout(struct sdhci_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) * HW busy detection timeout is based on programmed data timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) * counter and maximum supported timeout is 11s which may not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) * enough for long operations like cache flush, sleep awake, erase.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) * ERASE_TIMEOUT_LIMIT bit of VENDOR_MISC_CTRL register allows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) * host controller to wait for busy state until the card is busy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) * without HW timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) * So, use infinite busy wait mode for operations that may take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) * more than maximum HW busy timeout of 11s otherwise use finite
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) * busy wait mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) if (cmd && cmd->busy_timeout >= 11 * MSEC_PER_SEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) val |= SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) val &= ~SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_MISC_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) __sdhci_set_timeout(host, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) static void sdhci_tegra_cqe_pre_enable(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) struct cqhci_host *cq_host = mmc->cqe_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) reg = cqhci_readl(cq_host, CQHCI_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) reg |= CQHCI_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) cqhci_writel(cq_host, reg, CQHCI_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) static void sdhci_tegra_cqe_post_disable(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) struct cqhci_host *cq_host = mmc->cqe_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) struct sdhci_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) reg = cqhci_readl(cq_host, CQHCI_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) reg &= ~CQHCI_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) cqhci_writel(cq_host, reg, CQHCI_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) .write_l = tegra_cqhci_writel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) .enable = sdhci_tegra_cqe_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) .disable = sdhci_cqe_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) .dumpregs = sdhci_tegra_dumpregs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) .update_dcmd_desc = sdhci_tegra_update_dcmd_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) .pre_enable = sdhci_tegra_cqe_pre_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) .post_disable = sdhci_tegra_cqe_post_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) static int tegra_sdhci_set_dma_mask(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) struct sdhci_pltfm_host *platform = sdhci_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) struct sdhci_tegra *tegra = sdhci_pltfm_priv(platform);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) const struct sdhci_tegra_soc_data *soc = tegra->soc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) struct device *dev = mmc_dev(host->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) if (soc->dma_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) return dma_set_mask_and_coherent(dev, soc->dma_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) static const struct sdhci_ops tegra_sdhci_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) .get_ro = tegra_sdhci_get_ro,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) .read_w = tegra_sdhci_readw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) .write_l = tegra_sdhci_writel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) .set_clock = tegra_sdhci_set_clock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) .set_dma_mask = tegra_sdhci_set_dma_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) .set_bus_width = sdhci_set_bus_width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) .reset = tegra_sdhci_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) .platform_execute_tuning = tegra_sdhci_execute_tuning,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) .set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) .voltage_switch = tegra_sdhci_voltage_switch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) .get_max_clock = tegra_sdhci_get_max_clock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) static const struct sdhci_pltfm_data sdhci_tegra20_pdata = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) SDHCI_QUIRK_SINGLE_POWER_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) SDHCI_QUIRK_NO_HISPD_BIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) .ops = &tegra_sdhci_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) static const struct sdhci_tegra_soc_data soc_data_tegra20 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) .pdata = &sdhci_tegra20_pdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) .dma_mask = DMA_BIT_MASK(32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) .nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) NVQUIRK_ENABLE_BLOCK_GAP_DET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) static const struct sdhci_pltfm_data sdhci_tegra30_pdata = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) SDHCI_QUIRK_SINGLE_POWER_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) SDHCI_QUIRK_NO_HISPD_BIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) SDHCI_QUIRK2_BROKEN_HS200 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) * Auto-CMD23 leads to "Got command interrupt 0x00010000 even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) * though no command operation was in progress."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) * The exact reason is unknown, as the same hardware seems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) * to support Auto CMD23 on a downstream 3.1 kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) SDHCI_QUIRK2_ACMD23_BROKEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) .ops = &tegra_sdhci_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) .pdata = &sdhci_tegra30_pdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) .dma_mask = DMA_BIT_MASK(32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) .nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) NVQUIRK_ENABLE_SDR50 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) NVQUIRK_ENABLE_SDR104 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) NVQUIRK_HAS_PADCALIB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) static const struct sdhci_ops tegra114_sdhci_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) .get_ro = tegra_sdhci_get_ro,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) .read_w = tegra_sdhci_readw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) .write_w = tegra_sdhci_writew,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) .write_l = tegra_sdhci_writel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) .set_clock = tegra_sdhci_set_clock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) .set_dma_mask = tegra_sdhci_set_dma_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) .set_bus_width = sdhci_set_bus_width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) .reset = tegra_sdhci_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) .platform_execute_tuning = tegra_sdhci_execute_tuning,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) .set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) .voltage_switch = tegra_sdhci_voltage_switch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) .get_max_clock = tegra_sdhci_get_max_clock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) SDHCI_QUIRK_SINGLE_POWER_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) SDHCI_QUIRK_NO_HISPD_BIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) .ops = &tegra114_sdhci_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) .pdata = &sdhci_tegra114_pdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) .dma_mask = DMA_BIT_MASK(32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) static const struct sdhci_pltfm_data sdhci_tegra124_pdata = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) SDHCI_QUIRK_SINGLE_POWER_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) SDHCI_QUIRK_NO_HISPD_BIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) .ops = &tegra114_sdhci_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) .pdata = &sdhci_tegra124_pdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) .dma_mask = DMA_BIT_MASK(34),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) static const struct sdhci_ops tegra210_sdhci_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) .get_ro = tegra_sdhci_get_ro,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) .read_w = tegra_sdhci_readw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) .write_w = tegra210_sdhci_writew,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) .write_l = tegra_sdhci_writel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) .set_clock = tegra_sdhci_set_clock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) .set_dma_mask = tegra_sdhci_set_dma_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) .set_bus_width = sdhci_set_bus_width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) .reset = tegra_sdhci_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) .set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) .voltage_switch = tegra_sdhci_voltage_switch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) .get_max_clock = tegra_sdhci_get_max_clock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) .set_timeout = tegra_sdhci_set_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) SDHCI_QUIRK_SINGLE_POWER_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) SDHCI_QUIRK_NO_HISPD_BIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) .ops = &tegra210_sdhci_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) .pdata = &sdhci_tegra210_pdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) .dma_mask = DMA_BIT_MASK(34),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) NVQUIRK_HAS_PADCALIB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) NVQUIRK_ENABLE_SDR50 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) NVQUIRK_ENABLE_SDR104 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) NVQUIRK_HAS_TMCLK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) .min_tap_delay = 106,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) .max_tap_delay = 185,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) static const struct sdhci_ops tegra186_sdhci_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) .get_ro = tegra_sdhci_get_ro,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) .read_w = tegra_sdhci_readw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) .write_l = tegra_sdhci_writel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) .set_clock = tegra_sdhci_set_clock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) .set_dma_mask = tegra_sdhci_set_dma_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) .set_bus_width = sdhci_set_bus_width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) .reset = tegra_sdhci_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) .set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) .voltage_switch = tegra_sdhci_voltage_switch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) .get_max_clock = tegra_sdhci_get_max_clock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) .irq = sdhci_tegra_cqhci_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) .set_timeout = tegra_sdhci_set_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) SDHCI_QUIRK_SINGLE_POWER_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) SDHCI_QUIRK_NO_HISPD_BIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) .ops = &tegra186_sdhci_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) static const struct sdhci_tegra_soc_data soc_data_tegra186 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) .pdata = &sdhci_tegra186_pdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) .dma_mask = DMA_BIT_MASK(40),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) NVQUIRK_HAS_PADCALIB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) NVQUIRK_ENABLE_SDR50 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) NVQUIRK_ENABLE_SDR104 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) NVQUIRK_HAS_TMCLK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) .min_tap_delay = 84,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) .max_tap_delay = 136,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) static const struct sdhci_tegra_soc_data soc_data_tegra194 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) .pdata = &sdhci_tegra186_pdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) .dma_mask = DMA_BIT_MASK(39),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) NVQUIRK_HAS_PADCALIB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) NVQUIRK_ENABLE_SDR50 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) NVQUIRK_ENABLE_SDR104 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) NVQUIRK_HAS_TMCLK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) .min_tap_delay = 96,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) .max_tap_delay = 139,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) static const struct of_device_id sdhci_tegra_dt_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) { .compatible = "nvidia,tegra194-sdhci", .data = &soc_data_tegra194 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) { .compatible = "nvidia,tegra186-sdhci", .data = &soc_data_tegra186 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) { .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) { .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) { .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) { .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) MODULE_DEVICE_TABLE(of, sdhci_tegra_dt_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) static int sdhci_tegra_add_host(struct sdhci_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) struct cqhci_host *cq_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) bool dma64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) if (!tegra_host->enable_hwcq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) return sdhci_add_host(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) sdhci_enable_v4_mode(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) ret = sdhci_setup_host(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) cq_host = devm_kzalloc(host->mmc->parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) sizeof(*cq_host), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) if (!cq_host) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) cq_host->mmio = host->ioaddr + SDHCI_TEGRA_CQE_BASE_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) cq_host->ops = &sdhci_tegra_cqhci_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) if (dma64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) ret = cqhci_init(cq_host, host->mmc, dma64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) ret = __sdhci_add_host(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) sdhci_cleanup_host(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) static int sdhci_tegra_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) const struct of_device_id *match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) const struct sdhci_tegra_soc_data *soc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) struct sdhci_host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) struct sdhci_pltfm_host *pltfm_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) struct sdhci_tegra *tegra_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) match = of_match_device(sdhci_tegra_dt_match, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) if (!match)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) soc_data = match->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*tegra_host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) if (IS_ERR(host))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) return PTR_ERR(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) pltfm_host = sdhci_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) tegra_host = sdhci_pltfm_priv(pltfm_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) tegra_host->ddr_signaling = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) tegra_host->pad_calib_required = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) tegra_host->pad_control_available = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) tegra_host->soc_data = soc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) if (soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) rc = tegra_sdhci_init_pinctrl_info(&pdev->dev, tegra_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) host->mmc_host_ops.start_signal_voltage_switch =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) sdhci_tegra_start_signal_voltage_switch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) /* Hook to periodically rerun pad calibration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) host->mmc_host_ops.request = tegra_sdhci_request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) host->mmc_host_ops.hs400_enhanced_strobe =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) tegra_sdhci_hs400_enhanced_strobe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) if (!host->ops->platform_execute_tuning)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) host->mmc_host_ops.execute_tuning =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) tegra_sdhci_execute_hw_tuning;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) rc = mmc_of_parse(host->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) goto err_parse_dt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) host->mmc->caps |= MMC_CAP_1_8V_DDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) /* HW busy detection is supported, but R1B responses are required. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) tegra_sdhci_parse_dt(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) GPIOD_OUT_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) if (IS_ERR(tegra_host->power_gpio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) rc = PTR_ERR(tegra_host->power_gpio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) goto err_power_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) * Tegra210 has a separate SDMMC_LEGACY_TM clock used for host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) * timeout clock and SW can choose TMCLK or SDCLK for hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) * data timeout through the bit USE_TMCLK_FOR_DATA_TIMEOUT of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) * the register SDHCI_TEGRA_VENDOR_SYS_SW_CTRL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) * USE_TMCLK_FOR_DATA_TIMEOUT bit default is set to 1 and SDMMC uses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) * 12Mhz TMCLK which is advertised in host capability register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) * With TMCLK of 12Mhz provides maximum data timeout period that can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) * be achieved is 11s better than using SDCLK for data timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) * So, TMCLK is set to 12Mhz and kept enabled all the time on SoC's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) * supporting separate TMCLK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) if (soc_data->nvquirks & NVQUIRK_HAS_TMCLK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) clk = devm_clk_get(&pdev->dev, "tmclk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) if (IS_ERR(clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) rc = PTR_ERR(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) if (rc == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) goto err_power_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) dev_warn(&pdev->dev, "failed to get tmclk: %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) clk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) clk_set_rate(clk, 12000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) rc = clk_prepare_enable(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) "failed to enable tmclk: %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) goto err_power_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) tegra_host->tmclk = clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) clk = devm_clk_get(mmc_dev(host->mmc), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) if (IS_ERR(clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) rc = dev_err_probe(&pdev->dev, PTR_ERR(clk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) "failed to get clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) goto err_clk_get;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) clk_prepare_enable(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) pltfm_host->clk = clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) tegra_host->rst = devm_reset_control_get_exclusive(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) "sdhci");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) if (IS_ERR(tegra_host->rst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) rc = PTR_ERR(tegra_host->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) dev_err(&pdev->dev, "failed to get reset control: %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) goto err_rst_get;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) rc = reset_control_assert(tegra_host->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) goto err_rst_get;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) usleep_range(2000, 4000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) rc = reset_control_deassert(tegra_host->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) goto err_rst_get;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) usleep_range(2000, 4000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) rc = sdhci_tegra_add_host(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) goto err_add_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) err_add_host:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) reset_control_assert(tegra_host->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) err_rst_get:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) clk_disable_unprepare(pltfm_host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) err_clk_get:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) clk_disable_unprepare(tegra_host->tmclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) err_power_req:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) err_parse_dt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) sdhci_pltfm_free(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) static int sdhci_tegra_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) struct sdhci_host *host = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) sdhci_remove_host(host, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) reset_control_assert(tegra_host->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) usleep_range(2000, 4000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) clk_disable_unprepare(pltfm_host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) clk_disable_unprepare(tegra_host->tmclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) sdhci_pltfm_free(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) static int __maybe_unused sdhci_tegra_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) struct sdhci_host *host = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) if (host->mmc->caps2 & MMC_CAP2_CQE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) ret = cqhci_suspend(host->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) ret = sdhci_suspend_host(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) cqhci_resume(host->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) clk_disable_unprepare(pltfm_host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) static int __maybe_unused sdhci_tegra_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) struct sdhci_host *host = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) ret = clk_prepare_enable(pltfm_host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) ret = sdhci_resume_host(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) goto disable_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) if (host->mmc->caps2 & MMC_CAP2_CQE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) ret = cqhci_resume(host->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) goto suspend_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) suspend_host:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) sdhci_suspend_host(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) disable_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) clk_disable_unprepare(pltfm_host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) static SIMPLE_DEV_PM_OPS(sdhci_tegra_dev_pm_ops, sdhci_tegra_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) sdhci_tegra_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) static struct platform_driver sdhci_tegra_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) .name = "sdhci-tegra",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) .probe_type = PROBE_PREFER_ASYNCHRONOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) .of_match_table = sdhci_tegra_dt_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) .pm = &sdhci_tegra_dev_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) .probe = sdhci_tegra_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) .remove = sdhci_tegra_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) module_platform_driver(sdhci_tegra_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) MODULE_DESCRIPTION("SDHCI driver for Tegra");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) MODULE_AUTHOR("Google, Inc.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) MODULE_LICENSE("GPL v2");