^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2014-2015 MediaTek Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Chaotian.Jing <chaotian.jing@mediatek.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/of_gpio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/pinctrl/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/pm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/regulator/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/reset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/mmc/card.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/mmc/core.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/mmc/host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/mmc/mmc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/mmc/sd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/mmc/sdio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/mmc/slot-gpio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include "cqhci.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define MAX_BD_NUM 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /*--------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* Common Definition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /*--------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define MSDC_BUS_1BITS 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define MSDC_BUS_4BITS 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define MSDC_BUS_8BITS 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define MSDC_BURST_64B 0x6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /*--------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* Register Offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /*--------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define MSDC_CFG 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define MSDC_IOCON 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define MSDC_PS 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define MSDC_INT 0x0c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define MSDC_INTEN 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define MSDC_FIFOCS 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define SDC_CFG 0x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define SDC_CMD 0x34
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define SDC_ARG 0x38
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define SDC_STS 0x3c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define SDC_RESP0 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define SDC_RESP1 0x44
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define SDC_RESP2 0x48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define SDC_RESP3 0x4c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define SDC_BLK_NUM 0x50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define SDC_ADV_CFG0 0x64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define EMMC_IOCON 0x7c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define SDC_ACMD_RESP 0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define DMA_SA_H4BIT 0x8c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define MSDC_DMA_SA 0x90
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define MSDC_DMA_CTRL 0x98
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define MSDC_DMA_CFG 0x9c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define MSDC_PATCH_BIT 0xb0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define MSDC_PATCH_BIT1 0xb4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define MSDC_PATCH_BIT2 0xb8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define MSDC_PAD_TUNE 0xec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define MSDC_PAD_TUNE0 0xf0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define PAD_DS_TUNE 0x188
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define PAD_CMD_TUNE 0x18c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define EMMC50_CFG0 0x208
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define EMMC50_CFG3 0x220
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define SDC_FIFO_CFG 0x228
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /*--------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /* Top Pad Register Offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /*--------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define EMMC_TOP_CONTROL 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define EMMC_TOP_CMD 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define EMMC50_PAD_DS_TUNE 0x0c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /*--------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* Register Mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /*--------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* MSDC_CFG mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define MSDC_CFG_MODE (0x1 << 0) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define MSDC_CFG_CKPDN (0x1 << 1) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define MSDC_CFG_RST (0x1 << 2) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define MSDC_CFG_PIO (0x1 << 3) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define MSDC_CFG_CKDRVEN (0x1 << 4) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define MSDC_CFG_BV18SDT (0x1 << 5) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define MSDC_CFG_BV18PSS (0x1 << 6) /* R */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define MSDC_CFG_CKSTB (0x1 << 7) /* R */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define MSDC_CFG_CKDIV (0xff << 8) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define MSDC_CFG_CKMOD (0x3 << 16) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define MSDC_CFG_HS400_CK_MODE (0x1 << 18) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define MSDC_CFG_HS400_CK_MODE_EXTRA (0x1 << 22) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define MSDC_CFG_CKDIV_EXTRA (0xfff << 8) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define MSDC_CFG_CKMOD_EXTRA (0x3 << 20) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* MSDC_IOCON mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define MSDC_IOCON_SDR104CKS (0x1 << 0) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define MSDC_IOCON_RSPL (0x1 << 1) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define MSDC_IOCON_DSPL (0x1 << 2) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define MSDC_IOCON_DDLSEL (0x1 << 3) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define MSDC_IOCON_DDR50CKD (0x1 << 4) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define MSDC_IOCON_DSPLSEL (0x1 << 5) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define MSDC_IOCON_W_DSPL (0x1 << 8) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define MSDC_IOCON_D0SPL (0x1 << 16) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define MSDC_IOCON_D1SPL (0x1 << 17) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define MSDC_IOCON_D2SPL (0x1 << 18) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define MSDC_IOCON_D3SPL (0x1 << 19) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define MSDC_IOCON_D4SPL (0x1 << 20) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define MSDC_IOCON_D5SPL (0x1 << 21) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define MSDC_IOCON_D6SPL (0x1 << 22) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define MSDC_IOCON_D7SPL (0x1 << 23) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define MSDC_IOCON_RISCSZ (0x3 << 24) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /* MSDC_PS mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define MSDC_PS_CDEN (0x1 << 0) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define MSDC_PS_CDSTS (0x1 << 1) /* R */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define MSDC_PS_CDDEBOUNCE (0xf << 12) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define MSDC_PS_DAT (0xff << 16) /* R */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define MSDC_PS_DATA1 (0x1 << 17) /* R */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define MSDC_PS_CMD (0x1 << 24) /* R */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define MSDC_PS_WP (0x1 << 31) /* R */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /* MSDC_INT mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define MSDC_INT_MMCIRQ (0x1 << 0) /* W1C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define MSDC_INT_CDSC (0x1 << 1) /* W1C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define MSDC_INT_ACMDRDY (0x1 << 3) /* W1C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define MSDC_INT_ACMDTMO (0x1 << 4) /* W1C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define MSDC_INT_ACMDCRCERR (0x1 << 5) /* W1C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define MSDC_INT_DMAQ_EMPTY (0x1 << 6) /* W1C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define MSDC_INT_SDIOIRQ (0x1 << 7) /* W1C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define MSDC_INT_CMDRDY (0x1 << 8) /* W1C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #define MSDC_INT_CMDTMO (0x1 << 9) /* W1C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #define MSDC_INT_RSPCRCERR (0x1 << 10) /* W1C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define MSDC_INT_CSTA (0x1 << 11) /* R */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #define MSDC_INT_XFER_COMPL (0x1 << 12) /* W1C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #define MSDC_INT_DXFER_DONE (0x1 << 13) /* W1C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #define MSDC_INT_DATTMO (0x1 << 14) /* W1C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define MSDC_INT_DATCRCERR (0x1 << 15) /* W1C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define MSDC_INT_ACMD19_DONE (0x1 << 16) /* W1C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define MSDC_INT_DMA_BDCSERR (0x1 << 17) /* W1C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define MSDC_INT_DMA_GPDCSERR (0x1 << 18) /* W1C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define MSDC_INT_DMA_PROTECT (0x1 << 19) /* W1C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define MSDC_INT_CMDQ (0x1 << 28) /* W1C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /* MSDC_INTEN mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #define MSDC_INTEN_MMCIRQ (0x1 << 0) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #define MSDC_INTEN_CDSC (0x1 << 1) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #define MSDC_INTEN_ACMDRDY (0x1 << 3) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #define MSDC_INTEN_ACMDTMO (0x1 << 4) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #define MSDC_INTEN_ACMDCRCERR (0x1 << 5) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #define MSDC_INTEN_DMAQ_EMPTY (0x1 << 6) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #define MSDC_INTEN_SDIOIRQ (0x1 << 7) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #define MSDC_INTEN_CMDRDY (0x1 << 8) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #define MSDC_INTEN_CMDTMO (0x1 << 9) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #define MSDC_INTEN_RSPCRCERR (0x1 << 10) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #define MSDC_INTEN_CSTA (0x1 << 11) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #define MSDC_INTEN_XFER_COMPL (0x1 << 12) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #define MSDC_INTEN_DXFER_DONE (0x1 << 13) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define MSDC_INTEN_DATTMO (0x1 << 14) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define MSDC_INTEN_DATCRCERR (0x1 << 15) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #define MSDC_INTEN_ACMD19_DONE (0x1 << 16) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #define MSDC_INTEN_DMA_BDCSERR (0x1 << 17) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) #define MSDC_INTEN_DMA_GPDCSERR (0x1 << 18) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #define MSDC_INTEN_DMA_PROTECT (0x1 << 19) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /* MSDC_FIFOCS mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #define MSDC_FIFOCS_RXCNT (0xff << 0) /* R */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #define MSDC_FIFOCS_TXCNT (0xff << 16) /* R */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #define MSDC_FIFOCS_CLR (0x1 << 31) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /* SDC_CFG mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #define SDC_CFG_SDIOINTWKUP (0x1 << 0) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #define SDC_CFG_INSWKUP (0x1 << 1) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #define SDC_CFG_WRDTOC (0x1fff << 2) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #define SDC_CFG_BUSWIDTH (0x3 << 16) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #define SDC_CFG_SDIO (0x1 << 19) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #define SDC_CFG_SDIOIDE (0x1 << 20) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #define SDC_CFG_INTATGAP (0x1 << 21) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #define SDC_CFG_DTOC (0xff << 24) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /* SDC_STS mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) #define SDC_STS_SDCBUSY (0x1 << 0) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #define SDC_STS_CMDBUSY (0x1 << 1) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #define SDC_STS_SWR_COMPL (0x1 << 31) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #define SDC_DAT1_IRQ_TRIGGER (0x1 << 19) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /* SDC_ADV_CFG0 mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #define SDC_RX_ENHANCE_EN (0x1 << 20) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /* DMA_SA_H4BIT mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) #define DMA_ADDR_HIGH_4BIT (0xf << 0) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /* MSDC_DMA_CTRL mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #define MSDC_DMA_CTRL_START (0x1 << 0) /* W */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) #define MSDC_DMA_CTRL_STOP (0x1 << 1) /* W */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) #define MSDC_DMA_CTRL_RESUME (0x1 << 2) /* W */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) #define MSDC_DMA_CTRL_MODE (0x1 << 8) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) #define MSDC_DMA_CTRL_LASTBUF (0x1 << 10) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) #define MSDC_DMA_CTRL_BRUSTSZ (0x7 << 12) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /* MSDC_DMA_CFG mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) #define MSDC_DMA_CFG_STS (0x1 << 0) /* R */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) #define MSDC_DMA_CFG_DECSEN (0x1 << 1) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) #define MSDC_DMA_CFG_AHBHPROT2 (0x2 << 8) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) #define MSDC_DMA_CFG_ACTIVEEN (0x2 << 12) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) #define MSDC_DMA_CFG_CS12B16B (0x1 << 16) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /* MSDC_PATCH_BIT mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) #define MSDC_PATCH_BIT_ODDSUPP (0x1 << 1) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) #define MSDC_INT_DAT_LATCH_CK_SEL (0x7 << 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) #define MSDC_CKGEN_MSDC_DLY_SEL (0x1f << 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) #define MSDC_PATCH_BIT_IODSSEL (0x1 << 16) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) #define MSDC_PATCH_BIT_IOINTSEL (0x1 << 17) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) #define MSDC_PATCH_BIT_BUSYDLY (0xf << 18) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) #define MSDC_PATCH_BIT_WDOD (0xf << 22) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) #define MSDC_PATCH_BIT_IDRTSEL (0x1 << 26) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) #define MSDC_PATCH_BIT_CMDFSEL (0x1 << 27) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) #define MSDC_PATCH_BIT_INTDLSEL (0x1 << 28) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) #define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) #define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) #define MSDC_PATCH_BIT1_CMDTA (0x7 << 3) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) #define MSDC_PB1_BUSY_CHECK_SEL (0x1 << 7) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) #define MSDC_PATCH_BIT1_STOP_DLY (0xf << 8) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) #define MSDC_PATCH_BIT2_CFGRESP (0x1 << 15) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) #define MSDC_PATCH_BIT2_CFGCRCSTS (0x1 << 28) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) #define MSDC_PB2_SUPPORT_64G (0x1 << 1) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) #define MSDC_PB2_RESPWAIT (0x3 << 2) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) #define MSDC_PB2_RESPSTSENSEL (0x7 << 16) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) #define MSDC_PB2_CRCSTSENSEL (0x7 << 29) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) #define MSDC_PAD_TUNE_DATWRDLY (0x1f << 0) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) #define MSDC_PAD_TUNE_DATRRDLY (0x1f << 8) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) #define MSDC_PAD_TUNE_CMDRDLY (0x1f << 16) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) #define MSDC_PAD_TUNE_CMDRRDLY (0x1f << 22) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) #define MSDC_PAD_TUNE_CLKTDLY (0x1f << 27) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) #define MSDC_PAD_TUNE_RXDLYSEL (0x1 << 15) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) #define MSDC_PAD_TUNE_RD_SEL (0x1 << 13) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) #define MSDC_PAD_TUNE_CMD_SEL (0x1 << 21) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) #define PAD_DS_TUNE_DLY1 (0x1f << 2) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) #define PAD_DS_TUNE_DLY2 (0x1f << 7) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) #define PAD_DS_TUNE_DLY3 (0x1f << 12) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) #define PAD_CMD_TUNE_RX_DLY3 (0x1f << 1) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) #define EMMC50_CFG_PADCMD_LATCHCK (0x1 << 0) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) #define EMMC50_CFG_CRCSTS_EDGE (0x1 << 3) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) #define EMMC50_CFG_CFCSTS_SEL (0x1 << 4) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) #define EMMC50_CFG3_OUTS_WR (0x1f << 0) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) #define SDC_FIFO_CFG_WRVALIDSEL (0x1 << 24) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) #define SDC_FIFO_CFG_RDVALIDSEL (0x1 << 25) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) /* EMMC_TOP_CONTROL mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) #define PAD_RXDLY_SEL (0x1 << 0) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) #define DELAY_EN (0x1 << 1) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) #define PAD_DAT_RD_RXDLY2 (0x1f << 2) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) #define PAD_DAT_RD_RXDLY (0x1f << 7) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) #define PAD_DAT_RD_RXDLY2_SEL (0x1 << 12) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) #define PAD_DAT_RD_RXDLY_SEL (0x1 << 13) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) #define DATA_K_VALUE_SEL (0x1 << 14) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) #define SDC_RX_ENH_EN (0x1 << 15) /* TW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /* EMMC_TOP_CMD mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) #define PAD_CMD_RXDLY2 (0x1f << 0) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) #define PAD_CMD_RXDLY (0x1f << 5) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) #define PAD_CMD_RD_RXDLY2_SEL (0x1 << 10) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) #define PAD_CMD_RD_RXDLY_SEL (0x1 << 11) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) #define PAD_CMD_TX_DLY (0x1f << 12) /* RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) #define REQ_CMD_EIO (0x1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) #define REQ_CMD_TMO (0x1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) #define REQ_DAT_ERR (0x1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) #define REQ_STOP_EIO (0x1 << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) #define REQ_STOP_TMO (0x1 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) #define REQ_CMD_BUSY (0x1 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) #define MSDC_PREPARE_FLAG (0x1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) #define MSDC_ASYNC_FLAG (0x1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) #define MSDC_MMAP_FLAG (0x1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) #define MTK_MMC_AUTOSUSPEND_DELAY 50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) #define CMD_TIMEOUT (HZ/10 * 5) /* 100ms x5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) #define DAT_TIMEOUT (HZ * 5) /* 1000ms x5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) #define DEFAULT_DEBOUNCE (8) /* 8 cycles CD debounce */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) #define PAD_DELAY_MAX 32 /* PAD delay cells */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /*--------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /* Descriptor Structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /*--------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct mt_gpdma_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) u32 gpd_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) #define GPDMA_DESC_HWO (0x1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) #define GPDMA_DESC_BDP (0x1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) #define GPDMA_DESC_CHECKSUM (0xff << 8) /* bit8 ~ bit15 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) #define GPDMA_DESC_INT (0x1 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) #define GPDMA_DESC_NEXT_H4 (0xf << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) #define GPDMA_DESC_PTR_H4 (0xf << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) u32 next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) u32 ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) u32 gpd_data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) #define GPDMA_DESC_BUFLEN (0xffff) /* bit0 ~ bit15 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) #define GPDMA_DESC_EXTLEN (0xff << 16) /* bit16 ~ bit23 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) u32 arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) u32 blknum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) u32 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) struct mt_bdma_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) u32 bd_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) #define BDMA_DESC_EOL (0x1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) #define BDMA_DESC_CHECKSUM (0xff << 8) /* bit8 ~ bit15 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) #define BDMA_DESC_BLKPAD (0x1 << 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) #define BDMA_DESC_DWPAD (0x1 << 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) #define BDMA_DESC_NEXT_H4 (0xf << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) #define BDMA_DESC_PTR_H4 (0xf << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) u32 next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) u32 ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) u32 bd_data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) #define BDMA_DESC_BUFLEN (0xffff) /* bit0 ~ bit15 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) #define BDMA_DESC_BUFLEN_EXT (0xffffff) /* bit0 ~ bit23 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct msdc_dma {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct scatterlist *sg; /* I/O scatter list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct mt_gpdma_desc *gpd; /* pointer to gpd array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct mt_bdma_desc *bd; /* pointer to bd array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) dma_addr_t gpd_addr; /* the physical address of gpd array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) dma_addr_t bd_addr; /* the physical address of bd array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct msdc_save_para {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) u32 msdc_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) u32 iocon;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) u32 sdc_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) u32 pad_tune;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) u32 patch_bit0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) u32 patch_bit1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) u32 patch_bit2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) u32 pad_ds_tune;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) u32 pad_cmd_tune;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) u32 emmc50_cfg0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) u32 emmc50_cfg3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) u32 sdc_fifo_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) u32 emmc_top_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) u32 emmc_top_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) u32 emmc50_pad_ds_tune;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct mtk_mmc_compatible {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) u8 clk_div_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) bool recheck_sdio_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) bool hs400_tune; /* only used for MT8173 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) u32 pad_tune_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) bool async_fifo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) bool data_tune;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) bool busy_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) bool stop_clk_fix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) bool enhance_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) bool support_64g;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) bool use_internal_cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct msdc_tune_para {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) u32 iocon;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) u32 pad_tune;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) u32 pad_cmd_tune;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) u32 emmc_top_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) u32 emmc_top_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) struct msdc_delay_phase {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) u8 maxlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) u8 start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) u8 final_phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct msdc_host {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) const struct mtk_mmc_compatible *dev_comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) int cmd_rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct mmc_request *mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) struct mmc_command *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) struct mmc_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) void __iomem *base; /* host base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) void __iomem *top_base; /* host top register base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) struct msdc_dma dma; /* dma channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) u64 dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) u32 timeout_ns; /* data timeout ns */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) u32 timeout_clks; /* data timeout clks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct pinctrl *pinctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) struct pinctrl_state *pins_default;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct pinctrl_state *pins_uhs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) struct delayed_work req_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) int irq; /* host interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct reset_control *reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) struct clk *src_clk; /* msdc source clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) struct clk *h_clk; /* msdc h_clk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) struct clk *bus_clk; /* bus clock which used to access register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) struct clk *src_clk_cg; /* msdc source clock control gate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) u32 mclk; /* mmc subsystem clock frequency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) u32 src_clk_freq; /* source clock frequency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) unsigned char timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) bool vqmmc_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) u32 latch_ck;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) u32 hs400_ds_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) u32 hs200_cmd_int_delay; /* cmd internal delay for HS200/SDR104 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) u32 hs400_cmd_int_delay; /* cmd internal delay for HS400 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) bool hs400_cmd_resp_sel_rising;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) /* cmd response sample selection for HS400 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) bool hs400_mode; /* current eMMC will run at hs400 mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) bool internal_cd; /* Use internal card-detect logic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) bool cqhci; /* support eMMC hw cmdq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct msdc_save_para save_para; /* used when gate HCLK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct msdc_tune_para def_tune_para; /* default tune setting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct msdc_tune_para saved_tune_para; /* tune result of CMD21/CMD19 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) struct cqhci_host *cq_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) static const struct mtk_mmc_compatible mt8135_compat = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) .clk_div_bits = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) .recheck_sdio_irq = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) .hs400_tune = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) .pad_tune_reg = MSDC_PAD_TUNE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) .async_fifo = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) .data_tune = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) .busy_check = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) .stop_clk_fix = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) .enhance_rx = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) .support_64g = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static const struct mtk_mmc_compatible mt8173_compat = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) .clk_div_bits = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) .recheck_sdio_irq = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) .hs400_tune = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) .pad_tune_reg = MSDC_PAD_TUNE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) .async_fifo = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) .data_tune = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) .busy_check = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) .stop_clk_fix = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) .enhance_rx = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) .support_64g = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) static const struct mtk_mmc_compatible mt8183_compat = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) .clk_div_bits = 12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) .recheck_sdio_irq = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) .hs400_tune = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) .pad_tune_reg = MSDC_PAD_TUNE0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) .async_fifo = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) .data_tune = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) .busy_check = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) .stop_clk_fix = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) .enhance_rx = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) .support_64g = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) static const struct mtk_mmc_compatible mt2701_compat = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) .clk_div_bits = 12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) .recheck_sdio_irq = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) .hs400_tune = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) .pad_tune_reg = MSDC_PAD_TUNE0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) .async_fifo = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) .data_tune = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) .busy_check = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) .stop_clk_fix = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) .enhance_rx = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) .support_64g = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) static const struct mtk_mmc_compatible mt2712_compat = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) .clk_div_bits = 12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) .recheck_sdio_irq = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) .hs400_tune = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) .pad_tune_reg = MSDC_PAD_TUNE0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) .async_fifo = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) .data_tune = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) .busy_check = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) .stop_clk_fix = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) .enhance_rx = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) .support_64g = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) static const struct mtk_mmc_compatible mt7622_compat = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) .clk_div_bits = 12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) .recheck_sdio_irq = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) .hs400_tune = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) .pad_tune_reg = MSDC_PAD_TUNE0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) .async_fifo = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) .data_tune = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) .busy_check = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) .stop_clk_fix = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) .enhance_rx = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) .support_64g = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) static const struct mtk_mmc_compatible mt8516_compat = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) .clk_div_bits = 12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) .recheck_sdio_irq = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) .hs400_tune = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) .pad_tune_reg = MSDC_PAD_TUNE0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) .async_fifo = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) .data_tune = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) .busy_check = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) .stop_clk_fix = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) static const struct mtk_mmc_compatible mt7620_compat = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) .clk_div_bits = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) .recheck_sdio_irq = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) .hs400_tune = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) .pad_tune_reg = MSDC_PAD_TUNE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) .async_fifo = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) .data_tune = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) .busy_check = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) .stop_clk_fix = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) .enhance_rx = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) .use_internal_cd = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) static const struct mtk_mmc_compatible mt6779_compat = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) .clk_div_bits = 12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) .recheck_sdio_irq = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) .hs400_tune = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) .pad_tune_reg = MSDC_PAD_TUNE0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) .async_fifo = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) .data_tune = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) .busy_check = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) .stop_clk_fix = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) .enhance_rx = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) .support_64g = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) static const struct of_device_id msdc_of_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) { .compatible = "mediatek,mt8135-mmc", .data = &mt8135_compat},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) { .compatible = "mediatek,mt8173-mmc", .data = &mt8173_compat},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) { .compatible = "mediatek,mt8183-mmc", .data = &mt8183_compat},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) { .compatible = "mediatek,mt2701-mmc", .data = &mt2701_compat},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) { .compatible = "mediatek,mt2712-mmc", .data = &mt2712_compat},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) { .compatible = "mediatek,mt7622-mmc", .data = &mt7622_compat},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) { .compatible = "mediatek,mt8516-mmc", .data = &mt8516_compat},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) { .compatible = "mediatek,mt7620-mmc", .data = &mt7620_compat},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) { .compatible = "mediatek,mt6779-mmc", .data = &mt6779_compat},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) MODULE_DEVICE_TABLE(of, msdc_of_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) static void sdr_set_bits(void __iomem *reg, u32 bs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) u32 val = readl(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) val |= bs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) writel(val, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) static void sdr_clr_bits(void __iomem *reg, u32 bs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) u32 val = readl(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) val &= ~bs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) writel(val, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) static void sdr_set_field(void __iomem *reg, u32 field, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) unsigned int tv = readl(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) tv &= ~field;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) tv |= ((val) << (ffs((unsigned int)field) - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) writel(tv, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) static void sdr_get_field(void __iomem *reg, u32 field, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) unsigned int tv = readl(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) *val = ((tv & field) >> (ffs((unsigned int)field) - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) static void msdc_reset_hw(struct msdc_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_RST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) while (readl(host->base + MSDC_CFG) & MSDC_CFG_RST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) sdr_set_bits(host->base + MSDC_FIFOCS, MSDC_FIFOCS_CLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) while (readl(host->base + MSDC_FIFOCS) & MSDC_FIFOCS_CLR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) val = readl(host->base + MSDC_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) writel(val, host->base + MSDC_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) static void msdc_cmd_next(struct msdc_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) struct mmc_request *mrq, struct mmc_command *cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) static void __msdc_enable_sdio_irq(struct msdc_host *host, int enb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) static const u32 cmd_ints_mask = MSDC_INTEN_CMDRDY | MSDC_INTEN_RSPCRCERR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) MSDC_INTEN_CMDTMO | MSDC_INTEN_ACMDRDY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) MSDC_INTEN_ACMDCRCERR | MSDC_INTEN_ACMDTMO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) static const u32 data_ints_mask = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) MSDC_INTEN_DATCRCERR | MSDC_INTEN_DMA_BDCSERR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) MSDC_INTEN_DMA_GPDCSERR | MSDC_INTEN_DMA_PROTECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) static u8 msdc_dma_calcs(u8 *buf, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) u32 i, sum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) for (i = 0; i < len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) sum += buf[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) return 0xff - (u8) sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) static inline void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) unsigned int j, dma_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) dma_addr_t dma_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) u32 dma_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) struct mt_gpdma_desc *gpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) struct mt_bdma_desc *bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) sg = data->sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) gpd = dma->gpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) bd = dma->bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) /* modify gpd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) gpd->gpd_info |= GPDMA_DESC_HWO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) gpd->gpd_info |= GPDMA_DESC_BDP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) /* need to clear first. use these bits to calc checksum */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) gpd->gpd_info &= ~GPDMA_DESC_CHECKSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) gpd->gpd_info |= msdc_dma_calcs((u8 *) gpd, 16) << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /* modify bd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) for_each_sg(data->sg, sg, data->sg_count, j) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) dma_address = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) dma_len = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) /* init bd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) bd[j].bd_info &= ~BDMA_DESC_BLKPAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) bd[j].bd_info &= ~BDMA_DESC_DWPAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) bd[j].ptr = lower_32_bits(dma_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (host->dev_comp->support_64g) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) bd[j].bd_info &= ~BDMA_DESC_PTR_H4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) bd[j].bd_info |= (upper_32_bits(dma_address) & 0xf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) << 28;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (host->dev_comp->support_64g) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) bd[j].bd_data_len &= ~BDMA_DESC_BUFLEN_EXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) bd[j].bd_data_len |= (dma_len & BDMA_DESC_BUFLEN_EXT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) bd[j].bd_data_len &= ~BDMA_DESC_BUFLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) bd[j].bd_data_len |= (dma_len & BDMA_DESC_BUFLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (j == data->sg_count - 1) /* the last bd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) bd[j].bd_info |= BDMA_DESC_EOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) bd[j].bd_info &= ~BDMA_DESC_EOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) /* checksume need to clear first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) bd[j].bd_info &= ~BDMA_DESC_CHECKSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) bd[j].bd_info |= msdc_dma_calcs((u8 *)(&bd[j]), 16) << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) sdr_set_field(host->base + MSDC_DMA_CFG, MSDC_DMA_CFG_DECSEN, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) dma_ctrl = readl_relaxed(host->base + MSDC_DMA_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) dma_ctrl &= ~(MSDC_DMA_CTRL_BRUSTSZ | MSDC_DMA_CTRL_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) dma_ctrl |= (MSDC_BURST_64B << 12 | 1 << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) writel_relaxed(dma_ctrl, host->base + MSDC_DMA_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (host->dev_comp->support_64g)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) sdr_set_field(host->base + DMA_SA_H4BIT, DMA_ADDR_HIGH_4BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) upper_32_bits(dma->gpd_addr) & 0xf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) writel(lower_32_bits(dma->gpd_addr), host->base + MSDC_DMA_SA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) static void msdc_prepare_data(struct msdc_host *host, struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) struct mmc_data *data = mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (!(data->host_cookie & MSDC_PREPARE_FLAG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) data->host_cookie |= MSDC_PREPARE_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) mmc_get_dma_dir(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) static void msdc_unprepare_data(struct msdc_host *host, struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) struct mmc_data *data = mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (data->host_cookie & MSDC_ASYNC_FLAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (data->host_cookie & MSDC_PREPARE_FLAG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) dma_unmap_sg(host->dev, data->sg, data->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) mmc_get_dma_dir(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) data->host_cookie &= ~MSDC_PREPARE_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) static u64 msdc_timeout_cal(struct msdc_host *host, u64 ns, u64 clks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) struct mmc_host *mmc = mmc_from_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) u64 timeout, clk_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) u32 mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if (mmc->actual_clock == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) clk_ns = 1000000000ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) do_div(clk_ns, mmc->actual_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) timeout = ns + clk_ns - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) do_div(timeout, clk_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) timeout += clks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) /* in 1048576 sclk cycle unit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) timeout = DIV_ROUND_UP(timeout, (0x1 << 20));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (host->dev_comp->clk_div_bits == 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) sdr_get_field(host->base + MSDC_CFG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) MSDC_CFG_CKMOD, &mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) sdr_get_field(host->base + MSDC_CFG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) MSDC_CFG_CKMOD_EXTRA, &mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /*DDR mode will double the clk cycles for data timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) timeout = mode >= 2 ? timeout * 2 : timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) timeout = timeout > 1 ? timeout - 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) return timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) /* clock control primitives */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) static void msdc_set_timeout(struct msdc_host *host, u64 ns, u64 clks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) u64 timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) host->timeout_ns = ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) host->timeout_clks = clks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) timeout = msdc_timeout_cal(host, ns, clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) (u32)(timeout > 255 ? 255 : timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) static void msdc_set_busy_timeout(struct msdc_host *host, u64 ns, u64 clks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) u64 timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) timeout = msdc_timeout_cal(host, ns, clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) sdr_set_field(host->base + SDC_CFG, SDC_CFG_WRDTOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) (u32)(timeout > 8191 ? 8191 : timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) static void msdc_gate_clock(struct msdc_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) clk_disable_unprepare(host->src_clk_cg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) clk_disable_unprepare(host->src_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) clk_disable_unprepare(host->bus_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) clk_disable_unprepare(host->h_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) static void msdc_ungate_clock(struct msdc_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) clk_prepare_enable(host->h_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) clk_prepare_enable(host->bus_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) clk_prepare_enable(host->src_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) clk_prepare_enable(host->src_clk_cg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) struct mmc_host *mmc = mmc_from_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) u32 mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) u32 div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) u32 sclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) u32 tune_reg = host->dev_comp->pad_tune_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (!hz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) dev_dbg(host->dev, "set mclk to 0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) host->mclk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) mmc->actual_clock = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) flags = readl(host->base + MSDC_INTEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) sdr_clr_bits(host->base + MSDC_INTEN, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if (host->dev_comp->clk_div_bits == 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_HS400_CK_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) sdr_clr_bits(host->base + MSDC_CFG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) MSDC_CFG_HS400_CK_MODE_EXTRA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (timing == MMC_TIMING_UHS_DDR50 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) timing == MMC_TIMING_MMC_DDR52 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) timing == MMC_TIMING_MMC_HS400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (timing == MMC_TIMING_MMC_HS400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) mode = 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) mode = 0x2; /* ddr mode and use divisor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (hz >= (host->src_clk_freq >> 2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) div = 0; /* mean div = 1/4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) sclk = host->src_clk_freq >> 2; /* sclk = clk / 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) div = (host->src_clk_freq + ((hz << 2) - 1)) / (hz << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) sclk = (host->src_clk_freq >> 2) / div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) div = (div >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (timing == MMC_TIMING_MMC_HS400 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) hz >= (host->src_clk_freq >> 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (host->dev_comp->clk_div_bits == 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) sdr_set_bits(host->base + MSDC_CFG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) MSDC_CFG_HS400_CK_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) sdr_set_bits(host->base + MSDC_CFG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) MSDC_CFG_HS400_CK_MODE_EXTRA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) sclk = host->src_clk_freq >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) div = 0; /* div is ignore when bit18 is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) } else if (hz >= host->src_clk_freq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) mode = 0x1; /* no divisor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) div = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) sclk = host->src_clk_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) mode = 0x0; /* use divisor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) if (hz >= (host->src_clk_freq >> 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) div = 0; /* mean div = 1/2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) sclk = host->src_clk_freq >> 1; /* sclk = clk / 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) div = (host->src_clk_freq + ((hz << 2) - 1)) / (hz << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) sclk = (host->src_clk_freq >> 2) / div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * As src_clk/HCLK use the same bit to gate/ungate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * So if want to only gate src_clk, need gate its parent(mux).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) if (host->src_clk_cg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) clk_disable_unprepare(host->src_clk_cg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) clk_disable_unprepare(clk_get_parent(host->src_clk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) if (host->dev_comp->clk_div_bits == 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) sdr_set_field(host->base + MSDC_CFG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) MSDC_CFG_CKMOD | MSDC_CFG_CKDIV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) (mode << 8) | div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) sdr_set_field(host->base + MSDC_CFG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) MSDC_CFG_CKMOD_EXTRA | MSDC_CFG_CKDIV_EXTRA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) (mode << 12) | div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (host->src_clk_cg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) clk_prepare_enable(host->src_clk_cg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) clk_prepare_enable(clk_get_parent(host->src_clk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) mmc->actual_clock = sclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) host->mclk = hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) host->timing = timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) /* need because clk changed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) msdc_set_timeout(host, host->timeout_ns, host->timeout_clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) sdr_set_bits(host->base + MSDC_INTEN, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) * mmc_select_hs400() will drop to 50Mhz and High speed mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) * tune result of hs200/200Mhz is not suitable for 50Mhz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) if (mmc->actual_clock <= 52000000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) writel(host->def_tune_para.iocon, host->base + MSDC_IOCON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (host->top_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) writel(host->def_tune_para.emmc_top_control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) host->top_base + EMMC_TOP_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) writel(host->def_tune_para.emmc_top_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) host->top_base + EMMC_TOP_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) writel(host->def_tune_para.pad_tune,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) host->base + tune_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) writel(host->saved_tune_para.iocon, host->base + MSDC_IOCON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) writel(host->saved_tune_para.pad_cmd_tune,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) host->base + PAD_CMD_TUNE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (host->top_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) writel(host->saved_tune_para.emmc_top_control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) host->top_base + EMMC_TOP_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) writel(host->saved_tune_para.emmc_top_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) host->top_base + EMMC_TOP_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) writel(host->saved_tune_para.pad_tune,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) host->base + tune_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) if (timing == MMC_TIMING_MMC_HS400 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) host->dev_comp->hs400_tune)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) sdr_set_field(host->base + tune_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) MSDC_PAD_TUNE_CMDRRDLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) host->hs400_cmd_int_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) dev_dbg(host->dev, "sclk: %d, timing: %d\n", mmc->actual_clock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) timing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) static inline u32 msdc_cmd_find_resp(struct msdc_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) struct mmc_request *mrq, struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) u32 resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) switch (mmc_resp_type(cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) /* Actually, R1, R5, R6, R7 are the same */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) case MMC_RSP_R1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) resp = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) case MMC_RSP_R1B:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) resp = 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) case MMC_RSP_R2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) resp = 0x2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) case MMC_RSP_R3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) resp = 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) case MMC_RSP_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) resp = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) return resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) static inline u32 msdc_cmd_prepare_raw_cmd(struct msdc_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) struct mmc_request *mrq, struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) struct mmc_host *mmc = mmc_from_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) /* rawcmd :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * vol_swt << 30 | auto_cmd << 28 | blklen << 16 | go_irq << 15 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * stop << 14 | rw << 13 | dtype << 11 | rsptyp << 7 | brk << 6 | opcode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) u32 opcode = cmd->opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) u32 resp = msdc_cmd_find_resp(host, mrq, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) u32 rawcmd = (opcode & 0x3f) | ((resp & 0x7) << 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) host->cmd_rsp = resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if ((opcode == SD_IO_RW_DIRECT && cmd->flags == (unsigned int) -1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) opcode == MMC_STOP_TRANSMISSION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) rawcmd |= (0x1 << 14);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) else if (opcode == SD_SWITCH_VOLTAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) rawcmd |= (0x1 << 30);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) else if (opcode == SD_APP_SEND_SCR ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) opcode == SD_APP_SEND_NUM_WR_BLKS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) (opcode == SD_SWITCH && mmc_cmd_type(cmd) == MMC_CMD_ADTC) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) (opcode == SD_APP_SD_STATUS && mmc_cmd_type(cmd) == MMC_CMD_ADTC) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) (opcode == MMC_SEND_EXT_CSD && mmc_cmd_type(cmd) == MMC_CMD_ADTC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) rawcmd |= (0x1 << 11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (cmd->data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) struct mmc_data *data = cmd->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) if (mmc_op_multi(opcode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (mmc_card_mmc(mmc->card) && mrq->sbc &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) !(mrq->sbc->arg & 0xFFFF0000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) rawcmd |= 0x2 << 28; /* AutoCMD23 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) rawcmd |= ((data->blksz & 0xFFF) << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) if (data->flags & MMC_DATA_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) rawcmd |= (0x1 << 13);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) if (data->blocks > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) rawcmd |= (0x2 << 11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) rawcmd |= (0x1 << 11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) /* Always use dma mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_PIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) if (host->timeout_ns != data->timeout_ns ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) host->timeout_clks != data->timeout_clks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) msdc_set_timeout(host, data->timeout_ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) data->timeout_clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) writel(data->blocks, host->base + SDC_BLK_NUM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) return rawcmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) static void msdc_start_data(struct msdc_host *host, struct mmc_request *mrq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) struct mmc_command *cmd, struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) bool read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) WARN_ON(host->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) host->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) read = data->flags & MMC_DATA_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) msdc_dma_setup(host, &host->dma, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) sdr_set_bits(host->base + MSDC_INTEN, data_ints_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_START, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) dev_dbg(host->dev, "DMA start\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) dev_dbg(host->dev, "%s: cmd=%d DMA data: %d blocks; read=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) __func__, cmd->opcode, data->blocks, read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) static int msdc_auto_cmd_done(struct msdc_host *host, int events,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) u32 *rsp = cmd->resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) rsp[0] = readl(host->base + SDC_ACMD_RESP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) if (events & MSDC_INT_ACMDRDY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) cmd->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) msdc_reset_hw(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) if (events & MSDC_INT_ACMDCRCERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) cmd->error = -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) host->error |= REQ_STOP_EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) } else if (events & MSDC_INT_ACMDTMO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) cmd->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) host->error |= REQ_STOP_TMO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) dev_err(host->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) "%s: AUTO_CMD%d arg=%08X; rsp %08X; cmd_error=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) __func__, cmd->opcode, cmd->arg, rsp[0], cmd->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) return cmd->error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * msdc_recheck_sdio_irq - recheck whether the SDIO irq is lost
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) * Host controller may lost interrupt in some special case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * Add SDIO irq recheck mechanism to make sure all interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) * can be processed immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) static void msdc_recheck_sdio_irq(struct msdc_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) struct mmc_host *mmc = mmc_from_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) u32 reg_int, reg_inten, reg_ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) if (mmc->caps & MMC_CAP_SDIO_IRQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) reg_inten = readl(host->base + MSDC_INTEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (reg_inten & MSDC_INTEN_SDIOIRQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) reg_int = readl(host->base + MSDC_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) reg_ps = readl(host->base + MSDC_PS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (!(reg_int & MSDC_INT_SDIOIRQ ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) reg_ps & MSDC_PS_DATA1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) __msdc_enable_sdio_irq(host, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) sdio_signal_irq(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) static void msdc_track_cmd_data(struct msdc_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) struct mmc_command *cmd, struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (host->error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) dev_dbg(host->dev, "%s: cmd=%d arg=%08X; host->error=0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) __func__, cmd->opcode, cmd->arg, host->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) * No need check the return value of cancel_delayed_work, as only ONE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * path will go here!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) cancel_delayed_work(&host->req_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) host->mrq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) msdc_track_cmd_data(host, mrq->cmd, mrq->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) if (mrq->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) msdc_unprepare_data(host, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) if (host->error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) msdc_reset_hw(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) mmc_request_done(mmc_from_priv(host), mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) if (host->dev_comp->recheck_sdio_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) msdc_recheck_sdio_irq(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) /* returns true if command is fully handled; returns false otherwise */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) static bool msdc_cmd_done(struct msdc_host *host, int events,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) struct mmc_request *mrq, struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) bool done = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) bool sbc_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) u32 *rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (mrq->sbc && cmd == mrq->cmd &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) (events & (MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) | MSDC_INT_ACMDTMO)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) msdc_auto_cmd_done(host, events, mrq->sbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) sbc_error = mrq->sbc && mrq->sbc->error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) if (!sbc_error && !(events & (MSDC_INT_CMDRDY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) | MSDC_INT_RSPCRCERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) | MSDC_INT_CMDTMO)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) return done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) done = !host->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) host->cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) if (done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) rsp = cmd->resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) sdr_clr_bits(host->base + MSDC_INTEN, cmd_ints_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) if (cmd->flags & MMC_RSP_PRESENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if (cmd->flags & MMC_RSP_136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) rsp[0] = readl(host->base + SDC_RESP3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) rsp[1] = readl(host->base + SDC_RESP2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) rsp[2] = readl(host->base + SDC_RESP1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) rsp[3] = readl(host->base + SDC_RESP0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) rsp[0] = readl(host->base + SDC_RESP0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) if (!sbc_error && !(events & MSDC_INT_CMDRDY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) if (events & MSDC_INT_CMDTMO ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) (cmd->opcode != MMC_SEND_TUNING_BLOCK &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) * should not clear fifo/interrupt as the tune data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) * may have alreay come when cmd19/cmd21 gets response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) * CRC error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) msdc_reset_hw(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) if (events & MSDC_INT_RSPCRCERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) cmd->error = -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) host->error |= REQ_CMD_EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) } else if (events & MSDC_INT_CMDTMO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) cmd->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) host->error |= REQ_CMD_TMO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) if (cmd->error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) dev_dbg(host->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) "%s: cmd=%d arg=%08X; rsp %08X; cmd_error=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) __func__, cmd->opcode, cmd->arg, rsp[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) cmd->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) msdc_cmd_next(host, mrq, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) /* It is the core layer's responsibility to ensure card status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) * is correct before issue a request. but host design do below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) * checks recommended.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) static inline bool msdc_cmd_is_ready(struct msdc_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) struct mmc_request *mrq, struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) /* The max busy time we can endure is 20ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) unsigned long tmo = jiffies + msecs_to_jiffies(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) while ((readl(host->base + SDC_STS) & SDC_STS_CMDBUSY) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) time_before(jiffies, tmo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) if (readl(host->base + SDC_STS) & SDC_STS_CMDBUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) dev_err(host->dev, "CMD bus busy detected\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) host->error |= REQ_CMD_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) msdc_cmd_done(host, MSDC_INT_CMDTMO, mrq, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) if (mmc_resp_type(cmd) == MMC_RSP_R1B || cmd->data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) tmo = jiffies + msecs_to_jiffies(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) /* R1B or with data, should check SDCBUSY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) while ((readl(host->base + SDC_STS) & SDC_STS_SDCBUSY) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) time_before(jiffies, tmo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) if (readl(host->base + SDC_STS) & SDC_STS_SDCBUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) dev_err(host->dev, "Controller busy detected\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) host->error |= REQ_CMD_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) msdc_cmd_done(host, MSDC_INT_CMDTMO, mrq, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) static void msdc_start_command(struct msdc_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) struct mmc_request *mrq, struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) u32 rawcmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) WARN_ON(host->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) host->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if (!msdc_cmd_is_ready(host, mrq, cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if ((readl(host->base + MSDC_FIFOCS) & MSDC_FIFOCS_TXCNT) >> 16 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) readl(host->base + MSDC_FIFOCS) & MSDC_FIFOCS_RXCNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) dev_err(host->dev, "TX/RX FIFO non-empty before start of IO. Reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) msdc_reset_hw(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) cmd->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) rawcmd = msdc_cmd_prepare_raw_cmd(host, mrq, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) sdr_set_bits(host->base + MSDC_INTEN, cmd_ints_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) writel(cmd->arg, host->base + SDC_ARG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) writel(rawcmd, host->base + SDC_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) static void msdc_cmd_next(struct msdc_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) struct mmc_request *mrq, struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) if ((cmd->error &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) !(cmd->error == -EILSEQ &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) (cmd->opcode == MMC_SEND_TUNING_BLOCK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200))) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) (mrq->sbc && mrq->sbc->error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) msdc_request_done(host, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) else if (cmd == mrq->sbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) msdc_start_command(host, mrq, mrq->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) else if (!cmd->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) msdc_request_done(host, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) msdc_start_data(host, mrq, cmd, cmd->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) static void msdc_ops_request(struct mmc_host *mmc, struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) struct msdc_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) host->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) WARN_ON(host->mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) host->mrq = mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) if (mrq->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) msdc_prepare_data(host, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) /* if SBC is required, we have HW option and SW option.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) * if HW option is enabled, and SBC does not have "special" flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) * use HW option, otherwise use SW option
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (mrq->sbc && (!mmc_card_mmc(mmc->card) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) (mrq->sbc->arg & 0xFFFF0000)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) msdc_start_command(host, mrq, mrq->sbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) msdc_start_command(host, mrq, mrq->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) static void msdc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) struct msdc_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) struct mmc_data *data = mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) msdc_prepare_data(host, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) data->host_cookie |= MSDC_ASYNC_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) static void msdc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) struct msdc_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) struct mmc_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) data = mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) if (data->host_cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) data->host_cookie &= ~MSDC_ASYNC_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) msdc_unprepare_data(host, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) static void msdc_data_xfer_next(struct msdc_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) struct mmc_request *mrq, struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) if (mmc_op_multi(mrq->cmd->opcode) && mrq->stop && !mrq->stop->error &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) !mrq->sbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) msdc_start_command(host, mrq, mrq->stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) msdc_request_done(host, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) struct mmc_request *mrq, struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) struct mmc_command *stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) bool done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) unsigned int check_data = events &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) (MSDC_INT_XFER_COMPL | MSDC_INT_DATCRCERR | MSDC_INT_DATTMO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) | MSDC_INT_DMA_BDCSERR | MSDC_INT_DMA_GPDCSERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) | MSDC_INT_DMA_PROTECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) done = !host->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) if (check_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) host->data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) if (done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) stop = data->stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) if (check_data || (stop && stop->error)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) dev_dbg(host->dev, "DMA status: 0x%8X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) readl(host->base + MSDC_DMA_CFG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_STOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) while (readl(host->base + MSDC_DMA_CFG) & MSDC_DMA_CFG_STS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) sdr_clr_bits(host->base + MSDC_INTEN, data_ints_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) dev_dbg(host->dev, "DMA stop\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) if ((events & MSDC_INT_XFER_COMPL) && (!stop || !stop->error)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) data->bytes_xfered = data->blocks * data->blksz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) dev_dbg(host->dev, "interrupt events: %x\n", events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) msdc_reset_hw(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) host->error |= REQ_DAT_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) data->bytes_xfered = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) if (events & MSDC_INT_DATTMO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) data->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) else if (events & MSDC_INT_DATCRCERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) data->error = -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) dev_dbg(host->dev, "%s: cmd=%d; blocks=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) __func__, mrq->cmd->opcode, data->blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) dev_dbg(host->dev, "data_error=%d xfer_size=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) (int)data->error, data->bytes_xfered);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) msdc_data_xfer_next(host, mrq, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) return done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) static void msdc_set_buswidth(struct msdc_host *host, u32 width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) u32 val = readl(host->base + SDC_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) val &= ~SDC_CFG_BUSWIDTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) switch (width) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) case MMC_BUS_WIDTH_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) val |= (MSDC_BUS_1BITS << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) case MMC_BUS_WIDTH_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) val |= (MSDC_BUS_4BITS << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) case MMC_BUS_WIDTH_8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) val |= (MSDC_BUS_8BITS << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) writel(val, host->base + SDC_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) dev_dbg(host->dev, "Bus Width = %d", width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) static int msdc_ops_switch_volt(struct mmc_host *mmc, struct mmc_ios *ios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) struct msdc_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) if (!IS_ERR(mmc->supply.vqmmc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) if (ios->signal_voltage != MMC_SIGNAL_VOLTAGE_330 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) ios->signal_voltage != MMC_SIGNAL_VOLTAGE_180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) dev_err(host->dev, "Unsupported signal voltage!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) ret = mmc_regulator_set_vqmmc(mmc, ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) dev_dbg(host->dev, "Regulator set error %d (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) ret, ios->signal_voltage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) /* Apply different pinctrl settings for different signal voltage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) pinctrl_select_state(host->pinctrl, host->pins_uhs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) pinctrl_select_state(host->pinctrl, host->pins_default);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) static int msdc_card_busy(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) struct msdc_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) u32 status = readl(host->base + MSDC_PS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) /* only check if data0 is low */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) return !(status & BIT(16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) static void msdc_request_timeout(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) struct msdc_host *host = container_of(work, struct msdc_host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) req_timeout.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) /* simulate HW timeout status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) dev_err(host->dev, "%s: aborting cmd/data/mrq\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) if (host->mrq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) dev_err(host->dev, "%s: aborting mrq=%p cmd=%d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) host->mrq, host->mrq->cmd->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) if (host->cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) dev_err(host->dev, "%s: aborting cmd=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) __func__, host->cmd->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) msdc_cmd_done(host, MSDC_INT_CMDTMO, host->mrq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) host->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) } else if (host->data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) dev_err(host->dev, "%s: abort data: cmd%d; %d blocks\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) __func__, host->mrq->cmd->opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) host->data->blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) msdc_data_xfer_done(host, MSDC_INT_DATTMO, host->mrq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) host->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) static void __msdc_enable_sdio_irq(struct msdc_host *host, int enb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) if (enb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) sdr_set_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) if (host->dev_comp->recheck_sdio_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) msdc_recheck_sdio_irq(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) static void msdc_enable_sdio_irq(struct mmc_host *mmc, int enb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) struct msdc_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) __msdc_enable_sdio_irq(host, enb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) if (enb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) pm_runtime_get_noresume(host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) pm_runtime_put_noidle(host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) static irqreturn_t msdc_cmdq_irq(struct msdc_host *host, u32 intsts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) struct mmc_host *mmc = mmc_from_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) int cmd_err = 0, dat_err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) if (intsts & MSDC_INT_RSPCRCERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) cmd_err = -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) dev_err(host->dev, "%s: CMD CRC ERR", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) } else if (intsts & MSDC_INT_CMDTMO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) cmd_err = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) dev_err(host->dev, "%s: CMD TIMEOUT ERR", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) if (intsts & MSDC_INT_DATCRCERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) dat_err = -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) dev_err(host->dev, "%s: DATA CRC ERR", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) } else if (intsts & MSDC_INT_DATTMO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) dat_err = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) dev_err(host->dev, "%s: DATA TIMEOUT ERR", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) if (cmd_err || dat_err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) dev_err(host->dev, "cmd_err = %d, dat_err =%d, intsts = 0x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) cmd_err, dat_err, intsts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) return cqhci_irq(mmc, 0, cmd_err, dat_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) static irqreturn_t msdc_irq(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) struct msdc_host *host = (struct msdc_host *) dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) struct mmc_host *mmc = mmc_from_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) struct mmc_request *mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) struct mmc_command *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) struct mmc_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) u32 events, event_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) events = readl(host->base + MSDC_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) event_mask = readl(host->base + MSDC_INTEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) if ((events & event_mask) & MSDC_INT_SDIOIRQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) __msdc_enable_sdio_irq(host, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) /* clear interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) writel(events & event_mask, host->base + MSDC_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) mrq = host->mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) cmd = host->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) data = host->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) if ((events & event_mask) & MSDC_INT_SDIOIRQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) sdio_signal_irq(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) if ((events & event_mask) & MSDC_INT_CDSC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) if (host->internal_cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) mmc_detect_change(mmc, msecs_to_jiffies(20));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) events &= ~MSDC_INT_CDSC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) if (!(events & (event_mask & ~MSDC_INT_SDIOIRQ)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) if ((mmc->caps2 & MMC_CAP2_CQE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) (events & MSDC_INT_CMDQ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) msdc_cmdq_irq(host, events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) /* clear interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) writel(events, host->base + MSDC_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) if (!mrq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) dev_err(host->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) "%s: MRQ=NULL; events=%08X; event_mask=%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) __func__, events, event_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) dev_dbg(host->dev, "%s: events=%08X\n", __func__, events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) if (cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) msdc_cmd_done(host, events, mrq, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) else if (data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) msdc_data_xfer_done(host, events, mrq, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) static void msdc_init_hw(struct msdc_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) u32 tune_reg = host->dev_comp->pad_tune_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) if (host->reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) reset_control_assert(host->reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) usleep_range(10, 50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) reset_control_deassert(host->reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) /* Configure to MMC/SD mode, clock free running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_MODE | MSDC_CFG_CKPDN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) /* Reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) msdc_reset_hw(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) /* Disable and clear all interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) writel(0, host->base + MSDC_INTEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) val = readl(host->base + MSDC_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) writel(val, host->base + MSDC_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) /* Configure card detection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) if (host->internal_cd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) sdr_set_field(host->base + MSDC_PS, MSDC_PS_CDDEBOUNCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) DEFAULT_DEBOUNCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) sdr_set_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) sdr_set_bits(host->base + MSDC_INTEN, MSDC_INTEN_CDSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) sdr_set_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_CDSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) if (host->top_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) writel(0, host->top_base + EMMC_TOP_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) writel(0, host->top_base + EMMC_TOP_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) writel(0, host->base + tune_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) writel(0, host->base + MSDC_IOCON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) writel(0x403c0046, host->base + MSDC_PATCH_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_CKGEN_MSDC_DLY_SEL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) writel(0xffff4089, host->base + MSDC_PATCH_BIT1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) sdr_set_bits(host->base + EMMC50_CFG0, EMMC50_CFG_CFCSTS_SEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) if (host->dev_comp->stop_clk_fix) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) sdr_set_field(host->base + MSDC_PATCH_BIT1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) MSDC_PATCH_BIT1_STOP_DLY, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) sdr_clr_bits(host->base + SDC_FIFO_CFG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) SDC_FIFO_CFG_WRVALIDSEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) sdr_clr_bits(host->base + SDC_FIFO_CFG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) SDC_FIFO_CFG_RDVALIDSEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) if (host->dev_comp->busy_check)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) sdr_clr_bits(host->base + MSDC_PATCH_BIT1, (1 << 7));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) if (host->dev_comp->async_fifo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) sdr_set_field(host->base + MSDC_PATCH_BIT2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) MSDC_PB2_RESPWAIT, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) if (host->dev_comp->enhance_rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) if (host->top_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) sdr_set_bits(host->top_base + EMMC_TOP_CONTROL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) SDC_RX_ENH_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) sdr_set_bits(host->base + SDC_ADV_CFG0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) SDC_RX_ENHANCE_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) sdr_set_field(host->base + MSDC_PATCH_BIT2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) MSDC_PB2_RESPSTSENSEL, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) sdr_set_field(host->base + MSDC_PATCH_BIT2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) MSDC_PB2_CRCSTSENSEL, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) /* use async fifo, then no need tune internal delay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) sdr_clr_bits(host->base + MSDC_PATCH_BIT2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) MSDC_PATCH_BIT2_CFGRESP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) sdr_set_bits(host->base + MSDC_PATCH_BIT2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) MSDC_PATCH_BIT2_CFGCRCSTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) if (host->dev_comp->support_64g)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) sdr_set_bits(host->base + MSDC_PATCH_BIT2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) MSDC_PB2_SUPPORT_64G);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) if (host->dev_comp->data_tune) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) if (host->top_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) sdr_set_bits(host->top_base + EMMC_TOP_CONTROL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) PAD_DAT_RD_RXDLY_SEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) sdr_clr_bits(host->top_base + EMMC_TOP_CONTROL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) DATA_K_VALUE_SEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) sdr_set_bits(host->top_base + EMMC_TOP_CMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) PAD_CMD_RD_RXDLY_SEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) sdr_set_bits(host->base + tune_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) MSDC_PAD_TUNE_RD_SEL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) MSDC_PAD_TUNE_CMD_SEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) /* choose clock tune */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) if (host->top_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) sdr_set_bits(host->top_base + EMMC_TOP_CONTROL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) PAD_RXDLY_SEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) sdr_set_bits(host->base + tune_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) MSDC_PAD_TUNE_RXDLYSEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) /* Configure to enable SDIO mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) * it's must otherwise sdio cmd5 failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) /* Config SDIO device detect interrupt function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) sdr_set_bits(host->base + SDC_ADV_CFG0, SDC_DAT1_IRQ_TRIGGER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) /* Configure to default data timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) host->def_tune_para.iocon = readl(host->base + MSDC_IOCON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) if (host->top_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) host->def_tune_para.emmc_top_control =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) readl(host->top_base + EMMC_TOP_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) host->def_tune_para.emmc_top_cmd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) readl(host->top_base + EMMC_TOP_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) host->saved_tune_para.emmc_top_control =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) readl(host->top_base + EMMC_TOP_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) host->saved_tune_para.emmc_top_cmd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) readl(host->top_base + EMMC_TOP_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) host->def_tune_para.pad_tune = readl(host->base + tune_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) host->saved_tune_para.pad_tune = readl(host->base + tune_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) dev_dbg(host->dev, "init hardware done!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) static void msdc_deinit_hw(struct msdc_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) if (host->internal_cd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) /* Disabled card-detect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) /* Disable and clear all interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) writel(0, host->base + MSDC_INTEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) val = readl(host->base + MSDC_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) writel(val, host->base + MSDC_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) /* init gpd and bd list in msdc_drv_probe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) static void msdc_init_gpd_bd(struct msdc_host *host, struct msdc_dma *dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) struct mt_gpdma_desc *gpd = dma->gpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) struct mt_bdma_desc *bd = dma->bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) memset(gpd, 0, sizeof(struct mt_gpdma_desc) * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) dma_addr = dma->gpd_addr + sizeof(struct mt_gpdma_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) gpd->gpd_info = GPDMA_DESC_BDP; /* hwo, cs, bd pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) /* gpd->next is must set for desc DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) * That's why must alloc 2 gpd structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) gpd->next = lower_32_bits(dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) if (host->dev_comp->support_64g)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) gpd->gpd_info |= (upper_32_bits(dma_addr) & 0xf) << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) dma_addr = dma->bd_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) gpd->ptr = lower_32_bits(dma->bd_addr); /* physical address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) if (host->dev_comp->support_64g)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) gpd->gpd_info |= (upper_32_bits(dma_addr) & 0xf) << 28;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) memset(bd, 0, sizeof(struct mt_bdma_desc) * MAX_BD_NUM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) for (i = 0; i < (MAX_BD_NUM - 1); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) dma_addr = dma->bd_addr + sizeof(*bd) * (i + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) bd[i].next = lower_32_bits(dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) if (host->dev_comp->support_64g)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) bd[i].bd_info |= (upper_32_bits(dma_addr) & 0xf) << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) struct msdc_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) msdc_set_buswidth(host, ios->bus_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) /* Suspend/Resume will do power off/on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) switch (ios->power_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) case MMC_POWER_UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) if (!IS_ERR(mmc->supply.vmmc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) msdc_init_hw(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) ios->vdd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) dev_err(host->dev, "Failed to set vmmc power!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) case MMC_POWER_ON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) ret = regulator_enable(mmc->supply.vqmmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) dev_err(host->dev, "Failed to set vqmmc power!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) host->vqmmc_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) case MMC_POWER_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) if (!IS_ERR(mmc->supply.vmmc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) regulator_disable(mmc->supply.vqmmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) host->vqmmc_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) if (host->mclk != ios->clock || host->timing != ios->timing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) msdc_set_mclk(host, ios->timing, ios->clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) static u32 test_delay_bit(u32 delay, u32 bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) bit %= PAD_DELAY_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) return delay & (1 << bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) static int get_delay_len(u32 delay, u32 start_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) for (i = 0; i < (PAD_DELAY_MAX - start_bit); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) if (test_delay_bit(delay, start_bit + i) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) return PAD_DELAY_MAX - start_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u32 delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) int start = 0, len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) int start_final = 0, len_final = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) u8 final_phase = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) struct msdc_delay_phase delay_phase = { 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) if (delay == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) dev_err(host->dev, "phase error: [map:%x]\n", delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) delay_phase.final_phase = final_phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) return delay_phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) while (start < PAD_DELAY_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) len = get_delay_len(delay, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) if (len_final < len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) start_final = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) len_final = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) start += len ? len : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) if (len >= 12 && start_final < 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) /* The rule is that to find the smallest delay cell */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) if (start_final == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) final_phase = (start_final + len_final / 3) % PAD_DELAY_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) final_phase = (start_final + len_final / 2) % PAD_DELAY_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) dev_info(host->dev, "phase: [map:%x] [maxlen:%d] [final:%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) delay, len_final, final_phase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) delay_phase.maxlen = len_final;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) delay_phase.start = start_final;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) delay_phase.final_phase = final_phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) return delay_phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) static inline void msdc_set_cmd_delay(struct msdc_host *host, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) u32 tune_reg = host->dev_comp->pad_tune_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) if (host->top_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) static inline void msdc_set_data_delay(struct msdc_host *host, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) u32 tune_reg = host->dev_comp->pad_tune_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) if (host->top_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) sdr_set_field(host->top_base + EMMC_TOP_CONTROL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) PAD_DAT_RD_RXDLY, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) struct msdc_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) u32 rise_delay = 0, fall_delay = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) struct msdc_delay_phase internal_delay_phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) u8 final_delay, final_maxlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) u32 internal_delay = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) u32 tune_reg = host->dev_comp->pad_tune_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) int cmd_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) if (mmc->ios.timing == MMC_TIMING_MMC_HS200 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) mmc->ios.timing == MMC_TIMING_UHS_SDR104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) sdr_set_field(host->base + tune_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) MSDC_PAD_TUNE_CMDRRDLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) host->hs200_cmd_int_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) for (i = 0 ; i < PAD_DELAY_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) msdc_set_cmd_delay(host, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) * Using the same parameters, it may sometimes pass the test,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) * but sometimes it may fail. To make sure the parameters are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) * more stable, we test each set of parameters 3 times.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) for (j = 0; j < 3; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) mmc_send_tuning(mmc, opcode, &cmd_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) if (!cmd_err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) rise_delay |= (1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) rise_delay &= ~(1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) final_rise_delay = get_best_delay(host, rise_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) /* if rising edge has enough margin, then do not scan falling edge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) if (final_rise_delay.maxlen >= 12 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) goto skip_fall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) for (i = 0; i < PAD_DELAY_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) msdc_set_cmd_delay(host, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) * Using the same parameters, it may sometimes pass the test,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) * but sometimes it may fail. To make sure the parameters are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) * more stable, we test each set of parameters 3 times.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) for (j = 0; j < 3; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) mmc_send_tuning(mmc, opcode, &cmd_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) if (!cmd_err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) fall_delay |= (1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) fall_delay &= ~(1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) final_fall_delay = get_best_delay(host, fall_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) skip_fall:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) if (final_fall_delay.maxlen >= 12 && final_fall_delay.start < 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) final_maxlen = final_fall_delay.maxlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) if (final_maxlen == final_rise_delay.maxlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) final_delay = final_rise_delay.final_phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) final_delay = final_fall_delay.final_phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) msdc_set_cmd_delay(host, final_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) if (host->dev_comp->async_fifo || host->hs200_cmd_int_delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) goto skip_internal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) for (i = 0; i < PAD_DELAY_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) sdr_set_field(host->base + tune_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) MSDC_PAD_TUNE_CMDRRDLY, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) mmc_send_tuning(mmc, opcode, &cmd_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) if (!cmd_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) internal_delay |= (1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) dev_dbg(host->dev, "Final internal delay: 0x%x\n", internal_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) internal_delay_phase = get_best_delay(host, internal_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRRDLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) internal_delay_phase.final_phase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) skip_internal:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) dev_dbg(host->dev, "Final cmd pad delay: %x\n", final_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) return final_delay == 0xff ? -EIO : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) static int hs400_tune_response(struct mmc_host *mmc, u32 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) struct msdc_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) u32 cmd_delay = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) struct msdc_delay_phase final_cmd_delay = { 0,};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) u8 final_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) int cmd_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) /* select EMMC50 PAD CMD tune */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) sdr_set_bits(host->base + PAD_CMD_TUNE, BIT(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) sdr_set_field(host->base + MSDC_PATCH_BIT1, MSDC_PATCH_BIT1_CMDTA, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) if (mmc->ios.timing == MMC_TIMING_MMC_HS200 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) mmc->ios.timing == MMC_TIMING_UHS_SDR104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) sdr_set_field(host->base + MSDC_PAD_TUNE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) MSDC_PAD_TUNE_CMDRRDLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) host->hs200_cmd_int_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) if (host->hs400_cmd_resp_sel_rising)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) for (i = 0 ; i < PAD_DELAY_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) sdr_set_field(host->base + PAD_CMD_TUNE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) PAD_CMD_TUNE_RX_DLY3, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) * Using the same parameters, it may sometimes pass the test,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) * but sometimes it may fail. To make sure the parameters are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) * more stable, we test each set of parameters 3 times.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) for (j = 0; j < 3; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) mmc_send_tuning(mmc, opcode, &cmd_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) if (!cmd_err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) cmd_delay |= (1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) cmd_delay &= ~(1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) final_cmd_delay = get_best_delay(host, cmd_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) sdr_set_field(host->base + PAD_CMD_TUNE, PAD_CMD_TUNE_RX_DLY3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) final_cmd_delay.final_phase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) final_delay = final_cmd_delay.final_phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) dev_dbg(host->dev, "Final cmd pad delay: %x\n", final_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) return final_delay == 0xff ? -EIO : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) struct msdc_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) u32 rise_delay = 0, fall_delay = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) u8 final_delay, final_maxlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_INT_DAT_LATCH_CK_SEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) host->latch_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) for (i = 0 ; i < PAD_DELAY_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) msdc_set_data_delay(host, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) ret = mmc_send_tuning(mmc, opcode, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) rise_delay |= (1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) final_rise_delay = get_best_delay(host, rise_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) /* if rising edge has enough margin, then do not scan falling edge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) if (final_rise_delay.maxlen >= 12 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) goto skip_fall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) for (i = 0; i < PAD_DELAY_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) msdc_set_data_delay(host, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) ret = mmc_send_tuning(mmc, opcode, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) fall_delay |= (1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) final_fall_delay = get_best_delay(host, fall_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) skip_fall:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) if (final_maxlen == final_rise_delay.maxlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) final_delay = final_rise_delay.final_phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) final_delay = final_fall_delay.final_phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) msdc_set_data_delay(host, final_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) dev_dbg(host->dev, "Final data pad delay: %x\n", final_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) return final_delay == 0xff ? -EIO : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) * MSDC IP which supports data tune + async fifo can do CMD/DAT tune
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) * together, which can save the tuning time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) static int msdc_tune_together(struct mmc_host *mmc, u32 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) struct msdc_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) u32 rise_delay = 0, fall_delay = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) u8 final_delay, final_maxlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_INT_DAT_LATCH_CK_SEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) host->latch_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) sdr_clr_bits(host->base + MSDC_IOCON,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) for (i = 0 ; i < PAD_DELAY_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) msdc_set_cmd_delay(host, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) msdc_set_data_delay(host, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) ret = mmc_send_tuning(mmc, opcode, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) rise_delay |= (1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) final_rise_delay = get_best_delay(host, rise_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) /* if rising edge has enough margin, then do not scan falling edge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) if (final_rise_delay.maxlen >= 12 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) goto skip_fall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) sdr_set_bits(host->base + MSDC_IOCON,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) for (i = 0; i < PAD_DELAY_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) msdc_set_cmd_delay(host, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) msdc_set_data_delay(host, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) ret = mmc_send_tuning(mmc, opcode, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) fall_delay |= (1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) final_fall_delay = get_best_delay(host, fall_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) skip_fall:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) if (final_maxlen == final_rise_delay.maxlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) sdr_clr_bits(host->base + MSDC_IOCON,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) final_delay = final_rise_delay.final_phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) sdr_set_bits(host->base + MSDC_IOCON,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) final_delay = final_fall_delay.final_phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) msdc_set_cmd_delay(host, final_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) msdc_set_data_delay(host, final_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) dev_dbg(host->dev, "Final pad delay: %x\n", final_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) return final_delay == 0xff ? -EIO : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) struct msdc_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) u32 tune_reg = host->dev_comp->pad_tune_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) if (host->dev_comp->data_tune && host->dev_comp->async_fifo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) ret = msdc_tune_together(mmc, opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) if (host->hs400_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) sdr_clr_bits(host->base + MSDC_IOCON,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) msdc_set_data_delay(host, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) goto tune_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) if (host->hs400_mode &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) host->dev_comp->hs400_tune)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) ret = hs400_tune_response(mmc, opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) ret = msdc_tune_response(mmc, opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) if (ret == -EIO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) dev_err(host->dev, "Tune response fail!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) if (host->hs400_mode == false) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) ret = msdc_tune_data(mmc, opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) if (ret == -EIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) dev_err(host->dev, "Tune data fail!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) tune_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) host->saved_tune_para.pad_tune = readl(host->base + tune_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) host->saved_tune_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) if (host->top_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) host->saved_tune_para.emmc_top_control = readl(host->top_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) EMMC_TOP_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) host->saved_tune_para.emmc_top_cmd = readl(host->top_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) EMMC_TOP_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) struct msdc_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) host->hs400_mode = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) if (host->top_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) writel(host->hs400_ds_delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) host->top_base + EMMC50_PAD_DS_TUNE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) /* hs400 mode must set it to 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) sdr_clr_bits(host->base + MSDC_PATCH_BIT2, MSDC_PATCH_BIT2_CFGCRCSTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) /* to improve read performance, set outstanding to 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) sdr_set_field(host->base + EMMC50_CFG3, EMMC50_CFG3_OUTS_WR, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) static void msdc_hw_reset(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) struct msdc_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) sdr_set_bits(host->base + EMMC_IOCON, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) udelay(10); /* 10us is enough */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) sdr_clr_bits(host->base + EMMC_IOCON, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) static void msdc_ack_sdio_irq(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) struct msdc_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) __msdc_enable_sdio_irq(host, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) static int msdc_get_cd(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) struct msdc_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) if (mmc->caps & MMC_CAP_NONREMOVABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) if (!host->internal_cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) return mmc_gpio_get_cd(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) val = readl(host->base + MSDC_PS) & MSDC_PS_CDSTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) if (mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) return !!val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) return !val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) static void msdc_cqe_enable(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) struct msdc_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) /* enable cmdq irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) writel(MSDC_INT_CMDQ, host->base + MSDC_INTEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) /* enable busy check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) sdr_set_bits(host->base + MSDC_PATCH_BIT1, MSDC_PB1_BUSY_CHECK_SEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) /* default write data / busy timeout 20s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) msdc_set_busy_timeout(host, 20 * 1000000000ULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) /* default read data timeout 1s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) msdc_set_timeout(host, 1000000000ULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) struct msdc_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) unsigned int val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) /* disable cmdq irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INT_CMDQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) /* disable busy check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) sdr_clr_bits(host->base + MSDC_PATCH_BIT1, MSDC_PB1_BUSY_CHECK_SEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) if (recovery) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) sdr_set_field(host->base + MSDC_DMA_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) MSDC_DMA_CTRL_STOP, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) if (WARN_ON(readl_poll_timeout(host->base + MSDC_DMA_CFG, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) !(val & MSDC_DMA_CFG_STS), 1, 3000)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) msdc_reset_hw(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) static void msdc_cqe_pre_enable(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) struct cqhci_host *cq_host = mmc->cqe_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) reg = cqhci_readl(cq_host, CQHCI_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) reg |= CQHCI_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) cqhci_writel(cq_host, reg, CQHCI_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) static void msdc_cqe_post_disable(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) struct cqhci_host *cq_host = mmc->cqe_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) reg = cqhci_readl(cq_host, CQHCI_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) reg &= ~CQHCI_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) cqhci_writel(cq_host, reg, CQHCI_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) static const struct mmc_host_ops mt_msdc_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) .post_req = msdc_post_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) .pre_req = msdc_pre_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) .request = msdc_ops_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) .set_ios = msdc_ops_set_ios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) .get_ro = mmc_gpio_get_ro,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) .get_cd = msdc_get_cd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) .enable_sdio_irq = msdc_enable_sdio_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) .ack_sdio_irq = msdc_ack_sdio_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) .start_signal_voltage_switch = msdc_ops_switch_volt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) .card_busy = msdc_card_busy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) .execute_tuning = msdc_execute_tuning,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) .prepare_hs400_tuning = msdc_prepare_hs400_tuning,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) .hw_reset = msdc_hw_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) static const struct cqhci_host_ops msdc_cmdq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) .enable = msdc_cqe_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) .disable = msdc_cqe_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) .pre_enable = msdc_cqe_pre_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) .post_disable = msdc_cqe_post_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) static void msdc_of_property_parse(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) struct msdc_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) of_property_read_u32(pdev->dev.of_node, "mediatek,latch-ck",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) &host->latch_ck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) of_property_read_u32(pdev->dev.of_node, "hs400-ds-delay",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) &host->hs400_ds_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) of_property_read_u32(pdev->dev.of_node, "mediatek,hs200-cmd-int-delay",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) &host->hs200_cmd_int_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) of_property_read_u32(pdev->dev.of_node, "mediatek,hs400-cmd-int-delay",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) &host->hs400_cmd_int_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) if (of_property_read_bool(pdev->dev.of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) "mediatek,hs400-cmd-resp-sel-rising"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) host->hs400_cmd_resp_sel_rising = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) host->hs400_cmd_resp_sel_rising = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) if (of_property_read_bool(pdev->dev.of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) "supports-cqe"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) host->cqhci = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) host->cqhci = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) static int msdc_drv_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) struct mmc_host *mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) struct msdc_host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) if (!pdev->dev.of_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) dev_err(&pdev->dev, "No DT found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) /* Allocate MMC host for this device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) mmc = mmc_alloc_host(sizeof(struct msdc_host), &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) if (!mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) ret = mmc_of_parse(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) goto host_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) host->base = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) if (IS_ERR(host->base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) ret = PTR_ERR(host->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) goto host_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) host->top_base = devm_ioremap_resource(&pdev->dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) if (IS_ERR(host->top_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) host->top_base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) ret = mmc_regulator_get_supply(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) goto host_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) host->src_clk = devm_clk_get(&pdev->dev, "source");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) if (IS_ERR(host->src_clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) ret = PTR_ERR(host->src_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) goto host_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) host->h_clk = devm_clk_get(&pdev->dev, "hclk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) if (IS_ERR(host->h_clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) ret = PTR_ERR(host->h_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) goto host_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) host->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) if (IS_ERR(host->bus_clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) host->bus_clk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) /*source clock control gate is optional clock*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) host->src_clk_cg = devm_clk_get(&pdev->dev, "source_cg");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) if (IS_ERR(host->src_clk_cg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) host->src_clk_cg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) host->reset = devm_reset_control_get_optional_exclusive(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) "hrst");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) if (IS_ERR(host->reset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) return PTR_ERR(host->reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) host->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) if (host->irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) goto host_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) host->pinctrl = devm_pinctrl_get(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) if (IS_ERR(host->pinctrl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) ret = PTR_ERR(host->pinctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) dev_err(&pdev->dev, "Cannot find pinctrl!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) goto host_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) host->pins_default = pinctrl_lookup_state(host->pinctrl, "default");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) if (IS_ERR(host->pins_default)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) ret = PTR_ERR(host->pins_default);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) dev_err(&pdev->dev, "Cannot find pinctrl default!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) goto host_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) if (IS_ERR(host->pins_uhs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) ret = PTR_ERR(host->pins_uhs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) dev_err(&pdev->dev, "Cannot find pinctrl uhs!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) goto host_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) msdc_of_property_parse(pdev, host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) host->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) host->dev_comp = of_device_get_match_data(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) host->src_clk_freq = clk_get_rate(host->src_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) /* Set host parameters to mmc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) mmc->ops = &mt_msdc_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) if (host->dev_comp->clk_div_bits == 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 4095);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) if (!(mmc->caps & MMC_CAP_NONREMOVABLE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) !mmc_can_gpio_cd(mmc) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) host->dev_comp->use_internal_cd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) * Is removable but no GPIO declared, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) * use internal functionality.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) host->internal_cd = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) if (mmc->caps & MMC_CAP_SDIO_IRQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) mmc->caps |= MMC_CAP_CMD23;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) if (host->cqhci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) /* MMC core transfer sizes tunable parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) mmc->max_segs = MAX_BD_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) if (host->dev_comp->support_64g)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) mmc->max_seg_size = BDMA_DESC_BUFLEN_EXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) mmc->max_seg_size = BDMA_DESC_BUFLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) mmc->max_blk_size = 2048;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) mmc->max_req_size = 512 * 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) mmc->max_blk_count = mmc->max_req_size / 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) if (host->dev_comp->support_64g)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) host->dma_mask = DMA_BIT_MASK(36);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) host->dma_mask = DMA_BIT_MASK(32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) mmc_dev(mmc)->dma_mask = &host->dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) host->timeout_clks = 3 * 1048576;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) host->dma.gpd = dma_alloc_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 2 * sizeof(struct mt_gpdma_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) &host->dma.gpd_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) host->dma.bd = dma_alloc_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) MAX_BD_NUM * sizeof(struct mt_bdma_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) &host->dma.bd_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) if (!host->dma.gpd || !host->dma.bd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) goto release_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) msdc_init_gpd_bd(host, &host->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) INIT_DELAYED_WORK(&host->req_timeout, msdc_request_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) spin_lock_init(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) platform_set_drvdata(pdev, mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) msdc_ungate_clock(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) msdc_init_hw(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) if (mmc->caps2 & MMC_CAP2_CQE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) host->cq_host = devm_kzalloc(mmc->parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) sizeof(*host->cq_host),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) if (!host->cq_host) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) goto host_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) host->cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) host->cq_host->mmio = host->base + 0x800;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) host->cq_host->ops = &msdc_cmdq_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) ret = cqhci_init(host->cq_host, mmc, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) goto host_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) mmc->max_segs = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) /* cqhci 16bit length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) /* 0 size, means 65536 so we don't have to -1 here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) mmc->max_seg_size = 64 * 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) ret = devm_request_irq(&pdev->dev, host->irq, msdc_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) IRQF_TRIGGER_NONE, pdev->name, host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) goto release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) pm_runtime_set_active(host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) pm_runtime_set_autosuspend_delay(host->dev, MTK_MMC_AUTOSUSPEND_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) pm_runtime_use_autosuspend(host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) pm_runtime_enable(host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) ret = mmc_add_host(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) pm_runtime_disable(host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) platform_set_drvdata(pdev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) msdc_deinit_hw(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) msdc_gate_clock(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) release_mem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) if (host->dma.gpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) dma_free_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 2 * sizeof(struct mt_gpdma_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) host->dma.gpd, host->dma.gpd_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) if (host->dma.bd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) dma_free_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) MAX_BD_NUM * sizeof(struct mt_bdma_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) host->dma.bd, host->dma.bd_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) host_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) mmc_free_host(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) static int msdc_drv_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) struct mmc_host *mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) struct msdc_host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) mmc = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) pm_runtime_get_sync(host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) platform_set_drvdata(pdev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) mmc_remove_host(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) msdc_deinit_hw(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) msdc_gate_clock(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) pm_runtime_disable(host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) pm_runtime_put_noidle(host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) dma_free_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 2 * sizeof(struct mt_gpdma_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) host->dma.gpd, host->dma.gpd_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) dma_free_coherent(&pdev->dev, MAX_BD_NUM * sizeof(struct mt_bdma_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) host->dma.bd, host->dma.bd_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) mmc_free_host(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) static void msdc_save_reg(struct msdc_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) u32 tune_reg = host->dev_comp->pad_tune_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) host->save_para.msdc_cfg = readl(host->base + MSDC_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) host->save_para.iocon = readl(host->base + MSDC_IOCON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) host->save_para.sdc_cfg = readl(host->base + SDC_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) host->save_para.patch_bit0 = readl(host->base + MSDC_PATCH_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) host->save_para.patch_bit1 = readl(host->base + MSDC_PATCH_BIT1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) host->save_para.patch_bit2 = readl(host->base + MSDC_PATCH_BIT2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) host->save_para.pad_ds_tune = readl(host->base + PAD_DS_TUNE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) host->save_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) host->save_para.emmc50_cfg0 = readl(host->base + EMMC50_CFG0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) host->save_para.emmc50_cfg3 = readl(host->base + EMMC50_CFG3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) host->save_para.sdc_fifo_cfg = readl(host->base + SDC_FIFO_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) if (host->top_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) host->save_para.emmc_top_control =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) readl(host->top_base + EMMC_TOP_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) host->save_para.emmc_top_cmd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) readl(host->top_base + EMMC_TOP_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) host->save_para.emmc50_pad_ds_tune =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) readl(host->top_base + EMMC50_PAD_DS_TUNE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) host->save_para.pad_tune = readl(host->base + tune_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) static void msdc_restore_reg(struct msdc_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) struct mmc_host *mmc = mmc_from_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) u32 tune_reg = host->dev_comp->pad_tune_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) writel(host->save_para.msdc_cfg, host->base + MSDC_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) writel(host->save_para.iocon, host->base + MSDC_IOCON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) writel(host->save_para.sdc_cfg, host->base + SDC_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) writel(host->save_para.patch_bit0, host->base + MSDC_PATCH_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) writel(host->save_para.patch_bit1, host->base + MSDC_PATCH_BIT1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) writel(host->save_para.patch_bit2, host->base + MSDC_PATCH_BIT2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) writel(host->save_para.pad_ds_tune, host->base + PAD_DS_TUNE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) writel(host->save_para.pad_cmd_tune, host->base + PAD_CMD_TUNE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) writel(host->save_para.emmc50_cfg0, host->base + EMMC50_CFG0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) writel(host->save_para.emmc50_cfg3, host->base + EMMC50_CFG3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) writel(host->save_para.sdc_fifo_cfg, host->base + SDC_FIFO_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) if (host->top_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) writel(host->save_para.emmc_top_control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) host->top_base + EMMC_TOP_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) writel(host->save_para.emmc_top_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) host->top_base + EMMC_TOP_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) writel(host->save_para.emmc50_pad_ds_tune,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) host->top_base + EMMC50_PAD_DS_TUNE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) writel(host->save_para.pad_tune, host->base + tune_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) if (sdio_irq_claimed(mmc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) __msdc_enable_sdio_irq(host, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) static int __maybe_unused msdc_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) struct mmc_host *mmc = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) struct msdc_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) msdc_save_reg(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) msdc_gate_clock(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) static int __maybe_unused msdc_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) struct mmc_host *mmc = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) struct msdc_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) msdc_ungate_clock(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) msdc_restore_reg(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) static int __maybe_unused msdc_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) struct mmc_host *mmc = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) if (mmc->caps2 & MMC_CAP2_CQE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) ret = cqhci_suspend(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) return pm_runtime_force_suspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) static int __maybe_unused msdc_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) return pm_runtime_force_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) static const struct dev_pm_ops msdc_dev_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) SET_SYSTEM_SLEEP_PM_OPS(msdc_suspend, msdc_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) SET_RUNTIME_PM_OPS(msdc_runtime_suspend, msdc_runtime_resume, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) static struct platform_driver mt_msdc_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) .probe = msdc_drv_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) .remove = msdc_drv_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) .name = "mtk-msdc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) .probe_type = PROBE_PREFER_ASYNCHRONOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) .of_match_table = msdc_of_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) .pm = &msdc_dev_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) module_platform_driver(mt_msdc_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) MODULE_DESCRIPTION("MediaTek SD/MMC Card Driver");