^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Actions Semi Owl SoCs SD/MMC driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2014 Actions Semi Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (c) 2019 Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * TODO: SDIO support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/dma-direction.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/mmc/host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/mmc/slot-gpio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/reset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * SDC registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define OWL_REG_SD_EN 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define OWL_REG_SD_CTL 0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define OWL_REG_SD_STATE 0x0008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define OWL_REG_SD_CMD 0x000c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define OWL_REG_SD_ARG 0x0010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define OWL_REG_SD_RSPBUF0 0x0014
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define OWL_REG_SD_RSPBUF1 0x0018
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define OWL_REG_SD_RSPBUF2 0x001c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define OWL_REG_SD_RSPBUF3 0x0020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define OWL_REG_SD_RSPBUF4 0x0024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define OWL_REG_SD_DAT 0x0028
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define OWL_REG_SD_BLK_SIZE 0x002c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define OWL_REG_SD_BLK_NUM 0x0030
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define OWL_REG_SD_BUF_SIZE 0x0034
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* SD_EN Bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define OWL_SD_EN_RANE BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define OWL_SD_EN_RAN_SEED(x) (((x) & 0x3f) << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define OWL_SD_EN_S18EN BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define OWL_SD_EN_RESE BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define OWL_SD_EN_DAT1_S BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define OWL_SD_EN_CLK_S BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define OWL_SD_ENABLE BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define OWL_SD_EN_BSEL BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define OWL_SD_EN_SDIOEN BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define OWL_SD_EN_DDREN BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define OWL_SD_EN_DATAWID(x) (((x) & 0x3) << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /* SD_CTL Bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define OWL_SD_CTL_TOUTEN BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define OWL_SD_CTL_TOUTCNT(x) (((x) & 0x7f) << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define OWL_SD_CTL_DELAY_MSK GENMASK(23, 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define OWL_SD_CTL_RDELAY(x) (((x) & 0xf) << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define OWL_SD_CTL_WDELAY(x) (((x) & 0xf) << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define OWL_SD_CTL_CMDLEN BIT(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define OWL_SD_CTL_SCC BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define OWL_SD_CTL_TCN(x) (((x) & 0xf) << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define OWL_SD_CTL_TS BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define OWL_SD_CTL_LBE BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define OWL_SD_CTL_C7EN BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define OWL_SD_CTL_TM(x) (((x) & 0xf) << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define OWL_SD_DELAY_LOW_CLK 0x0f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define OWL_SD_DELAY_MID_CLK 0x0a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define OWL_SD_DELAY_HIGH_CLK 0x09
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define OWL_SD_RDELAY_DDR50 0x0a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define OWL_SD_WDELAY_DDR50 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* SD_STATE Bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define OWL_SD_STATE_DAT1BS BIT(18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define OWL_SD_STATE_SDIOB_P BIT(17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define OWL_SD_STATE_SDIOB_EN BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define OWL_SD_STATE_TOUTE BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define OWL_SD_STATE_BAEP BIT(14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define OWL_SD_STATE_MEMRDY BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define OWL_SD_STATE_CMDS BIT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define OWL_SD_STATE_DAT1AS BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define OWL_SD_STATE_SDIOA_P BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define OWL_SD_STATE_SDIOA_EN BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define OWL_SD_STATE_DAT0S BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define OWL_SD_STATE_TEIE BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define OWL_SD_STATE_TEI BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define OWL_SD_STATE_CLNR BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define OWL_SD_STATE_CLC BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define OWL_SD_STATE_WC16ER BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define OWL_SD_STATE_RC16ER BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define OWL_SD_STATE_CRC7ER BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define OWL_CMD_TIMEOUT_MS 30000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct owl_mmc_host {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct reset_control *reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct completion sdc_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) u32 clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) bool ddr_50;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) enum dma_data_direction dma_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct dma_chan *dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct dma_async_tx_descriptor *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct dma_slave_config dma_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct completion dma_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct mmc_host *mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct mmc_request *mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct mmc_command *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct mmc_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static void owl_mmc_update_reg(void __iomem *reg, unsigned int val, bool state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) unsigned int regval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) regval = readl(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) regval |= val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) regval &= ~val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) writel(regval, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static irqreturn_t owl_irq_handler(int irq, void *devid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct owl_mmc_host *owl_host = devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) u32 state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) spin_lock_irqsave(&owl_host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) state = readl(owl_host->base + OWL_REG_SD_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (state & OWL_SD_STATE_TEI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) state = readl(owl_host->base + OWL_REG_SD_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) state |= OWL_SD_STATE_TEI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) writel(state, owl_host->base + OWL_REG_SD_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) complete(&owl_host->sdc_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) spin_unlock_irqrestore(&owl_host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static void owl_mmc_finish_request(struct owl_mmc_host *owl_host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct mmc_request *mrq = owl_host->mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct mmc_data *data = mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /* Should never be NULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) WARN_ON(!mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) owl_host->mrq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) dma_unmap_sg(owl_host->dma->device->dev, data->sg, data->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) owl_host->dma_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* Finally finish request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) mmc_request_done(owl_host->mmc, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static void owl_mmc_send_cmd(struct owl_mmc_host *owl_host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct mmc_command *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) u32 mode, state, resp[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) u32 cmd_rsp_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) init_completion(&owl_host->sdc_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) switch (mmc_resp_type(cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) case MMC_RSP_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) mode = OWL_SD_CTL_TM(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) case MMC_RSP_R1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (data->flags & MMC_DATA_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) mode = OWL_SD_CTL_TM(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) mode = OWL_SD_CTL_TM(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) mode = OWL_SD_CTL_TM(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) cmd_rsp_mask = OWL_SD_STATE_CLNR | OWL_SD_STATE_CRC7ER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) case MMC_RSP_R1B:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) mode = OWL_SD_CTL_TM(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) cmd_rsp_mask = OWL_SD_STATE_CLNR | OWL_SD_STATE_CRC7ER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) case MMC_RSP_R2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) mode = OWL_SD_CTL_TM(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) cmd_rsp_mask = OWL_SD_STATE_CLNR | OWL_SD_STATE_CRC7ER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) case MMC_RSP_R3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) mode = OWL_SD_CTL_TM(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) cmd_rsp_mask = OWL_SD_STATE_CLNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) dev_warn(owl_host->dev, "Unknown MMC command\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) cmd->error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /* Keep current WDELAY and RDELAY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) mode |= (readl(owl_host->base + OWL_REG_SD_CTL) & (0xff << 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /* Start to send corresponding command type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) writel(cmd->arg, owl_host->base + OWL_REG_SD_ARG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) writel(cmd->opcode, owl_host->base + OWL_REG_SD_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /* Set LBE to send clk at the end of last read block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) mode |= (OWL_SD_CTL_TS | OWL_SD_CTL_LBE | 0x64000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) mode &= ~(OWL_SD_CTL_TOUTEN | OWL_SD_CTL_LBE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) mode |= OWL_SD_CTL_TS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) owl_host->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /* Start transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) writel(mode, owl_host->base + OWL_REG_SD_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) timeout = msecs_to_jiffies(cmd->busy_timeout ? cmd->busy_timeout :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) OWL_CMD_TIMEOUT_MS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (!wait_for_completion_timeout(&owl_host->sdc_complete, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) dev_err(owl_host->dev, "CMD interrupt timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) cmd->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) state = readl(owl_host->base + OWL_REG_SD_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (cmd_rsp_mask & state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (state & OWL_SD_STATE_CLNR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) dev_err(owl_host->dev, "Error CMD_NO_RSP\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) cmd->error = -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (state & OWL_SD_STATE_CRC7ER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) dev_err(owl_host->dev, "Error CMD_RSP_CRC\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) cmd->error = -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (mmc_resp_type(cmd) & MMC_RSP_136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) cmd->resp[3] = readl(owl_host->base + OWL_REG_SD_RSPBUF0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) cmd->resp[2] = readl(owl_host->base + OWL_REG_SD_RSPBUF1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) cmd->resp[1] = readl(owl_host->base + OWL_REG_SD_RSPBUF2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) cmd->resp[0] = readl(owl_host->base + OWL_REG_SD_RSPBUF3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) resp[0] = readl(owl_host->base + OWL_REG_SD_RSPBUF0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) resp[1] = readl(owl_host->base + OWL_REG_SD_RSPBUF1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) cmd->resp[0] = resp[1] << 24 | resp[0] >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) cmd->resp[1] = resp[1] >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static void owl_mmc_dma_complete(void *param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct owl_mmc_host *owl_host = param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) struct mmc_data *data = owl_host->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) complete(&owl_host->dma_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static int owl_mmc_prepare_data(struct owl_mmc_host *owl_host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) u32 total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN, OWL_SD_EN_BSEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) writel(data->blocks, owl_host->base + OWL_REG_SD_BLK_NUM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) writel(data->blksz, owl_host->base + OWL_REG_SD_BLK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) total = data->blksz * data->blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (total < 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) writel(total, owl_host->base + OWL_REG_SD_BUF_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) writel(512, owl_host->base + OWL_REG_SD_BUF_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (data->flags & MMC_DATA_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) owl_host->dma_dir = DMA_TO_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) owl_host->dma_cfg.direction = DMA_MEM_TO_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) owl_host->dma_dir = DMA_FROM_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) owl_host->dma_cfg.direction = DMA_DEV_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) dma_map_sg(owl_host->dma->device->dev, data->sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) data->sg_len, owl_host->dma_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) dmaengine_slave_config(owl_host->dma, &owl_host->dma_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) owl_host->desc = dmaengine_prep_slave_sg(owl_host->dma, data->sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) data->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) owl_host->dma_cfg.direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) DMA_PREP_INTERRUPT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (!owl_host->desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) dev_err(owl_host->dev, "Can't prepare slave sg\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) owl_host->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) owl_host->desc->callback = owl_mmc_dma_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) owl_host->desc->callback_param = (void *)owl_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) data->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static void owl_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct owl_mmc_host *owl_host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct mmc_data *data = mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) owl_host->mrq = mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (mrq->data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) ret = owl_mmc_prepare_data(owl_host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) data->error = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) init_completion(&owl_host->dma_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) dmaengine_submit(owl_host->desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) dma_async_issue_pending(owl_host->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) owl_mmc_send_cmd(owl_host, mrq->cmd, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (!wait_for_completion_timeout(&owl_host->sdc_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 10 * HZ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) dev_err(owl_host->dev, "CMD interrupt timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) mrq->cmd->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) dmaengine_terminate_all(owl_host->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (!wait_for_completion_timeout(&owl_host->dma_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 5 * HZ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) dev_err(owl_host->dev, "DMA interrupt timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) mrq->cmd->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) dmaengine_terminate_all(owl_host->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (data->stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) owl_mmc_send_cmd(owl_host, data->stop, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) data->bytes_xfered = data->blocks * data->blksz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) owl_mmc_finish_request(owl_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) static int owl_mmc_set_clk_rate(struct owl_mmc_host *owl_host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) unsigned int rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) unsigned long clk_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) reg = readl(owl_host->base + OWL_REG_SD_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) reg &= ~OWL_SD_CTL_DELAY_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) /* Set RDELAY and WDELAY based on the clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (rate <= 1000000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_DELAY_LOW_CLK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) OWL_SD_CTL_WDELAY(OWL_SD_DELAY_LOW_CLK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) owl_host->base + OWL_REG_SD_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) } else if ((rate > 1000000) && (rate <= 26000000)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_DELAY_MID_CLK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) OWL_SD_CTL_WDELAY(OWL_SD_DELAY_MID_CLK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) owl_host->base + OWL_REG_SD_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) } else if ((rate > 26000000) && (rate <= 52000000) && !owl_host->ddr_50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_DELAY_HIGH_CLK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) OWL_SD_CTL_WDELAY(OWL_SD_DELAY_HIGH_CLK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) owl_host->base + OWL_REG_SD_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /* DDR50 mode has special delay chain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) } else if ((rate > 26000000) && (rate <= 52000000) && owl_host->ddr_50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_RDELAY_DDR50) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) OWL_SD_CTL_WDELAY(OWL_SD_WDELAY_DDR50),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) owl_host->base + OWL_REG_SD_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) dev_err(owl_host->dev, "SD clock rate not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) clk_rate = clk_round_rate(owl_host->clk, rate << 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) ret = clk_set_rate(owl_host->clk, clk_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) static void owl_mmc_set_clk(struct owl_mmc_host *owl_host, struct mmc_ios *ios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (!ios->clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) owl_host->clock = ios->clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) owl_mmc_set_clk_rate(owl_host, ios->clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) static void owl_mmc_set_bus_width(struct owl_mmc_host *owl_host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct mmc_ios *ios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) reg = readl(owl_host->base + OWL_REG_SD_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) reg &= ~0x03;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) switch (ios->bus_width) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) case MMC_BUS_WIDTH_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) case MMC_BUS_WIDTH_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) reg |= OWL_SD_EN_DATAWID(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) case MMC_BUS_WIDTH_8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) reg |= OWL_SD_EN_DATAWID(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) writel(reg, owl_host->base + OWL_REG_SD_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) static void owl_mmc_ctr_reset(struct owl_mmc_host *owl_host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) reset_control_assert(owl_host->reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) udelay(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) reset_control_deassert(owl_host->reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) static void owl_mmc_power_on(struct owl_mmc_host *owl_host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) u32 mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) init_completion(&owl_host->sdc_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /* Enable transfer end IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) owl_mmc_update_reg(owl_host->base + OWL_REG_SD_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) OWL_SD_STATE_TEIE, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) /* Send init clk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) mode = (readl(owl_host->base + OWL_REG_SD_CTL) & (0xff << 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) mode |= OWL_SD_CTL_TS | OWL_SD_CTL_TCN(5) | OWL_SD_CTL_TM(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) writel(mode, owl_host->base + OWL_REG_SD_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (!wait_for_completion_timeout(&owl_host->sdc_complete, HZ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) dev_err(owl_host->dev, "CMD interrupt timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) static void owl_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) struct owl_mmc_host *owl_host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) switch (ios->power_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) case MMC_POWER_UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) dev_dbg(owl_host->dev, "Powering card up\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /* Reset the SDC controller to clear all previous states */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) owl_mmc_ctr_reset(owl_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) clk_prepare_enable(owl_host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) writel(OWL_SD_ENABLE | OWL_SD_EN_RESE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) owl_host->base + OWL_REG_SD_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) case MMC_POWER_ON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) dev_dbg(owl_host->dev, "Powering card on\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) owl_mmc_power_on(owl_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) case MMC_POWER_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) dev_dbg(owl_host->dev, "Powering card off\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) clk_disable_unprepare(owl_host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) dev_dbg(owl_host->dev, "Ignoring unknown card power state\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (ios->clock != owl_host->clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) owl_mmc_set_clk(owl_host, ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) owl_mmc_set_bus_width(owl_host, ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) /* Enable DDR mode if requested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (ios->timing == MMC_TIMING_UHS_DDR50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) owl_host->ddr_50 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) OWL_SD_EN_DDREN, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) owl_host->ddr_50 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static int owl_mmc_start_signal_voltage_switch(struct mmc_host *mmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) struct mmc_ios *ios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct owl_mmc_host *owl_host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) /* It is enough to change the pad ctrl bit for voltage switch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) switch (ios->signal_voltage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) case MMC_SIGNAL_VOLTAGE_330:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) OWL_SD_EN_S18EN, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) case MMC_SIGNAL_VOLTAGE_180:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) OWL_SD_EN_S18EN, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) static const struct mmc_host_ops owl_mmc_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) .request = owl_mmc_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) .set_ios = owl_mmc_set_ios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) .get_ro = mmc_gpio_get_ro,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) .get_cd = mmc_gpio_get_cd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) .start_signal_voltage_switch = owl_mmc_start_signal_voltage_switch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) static int owl_mmc_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) struct owl_mmc_host *owl_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct mmc_host *mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) mmc = mmc_alloc_host(sizeof(struct owl_mmc_host), &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (!mmc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) dev_err(&pdev->dev, "mmc alloc host failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) platform_set_drvdata(pdev, mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) owl_host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) owl_host->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) owl_host->mmc = mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) spin_lock_init(&owl_host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) owl_host->base = devm_ioremap_resource(&pdev->dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (IS_ERR(owl_host->base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) dev_err(&pdev->dev, "Failed to remap registers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) ret = PTR_ERR(owl_host->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) goto err_free_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) owl_host->clk = devm_clk_get(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (IS_ERR(owl_host->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) dev_err(&pdev->dev, "No clock defined\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) ret = PTR_ERR(owl_host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) goto err_free_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) owl_host->reset = devm_reset_control_get_exclusive(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (IS_ERR(owl_host->reset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) dev_err(&pdev->dev, "Could not get reset control\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) ret = PTR_ERR(owl_host->reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) goto err_free_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) mmc->ops = &owl_mmc_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) mmc->max_blk_count = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) mmc->max_blk_size = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) mmc->max_segs = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) mmc->max_seg_size = 262144;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) mmc->max_req_size = 262144;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) /* 100kHz ~ 52MHz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) mmc->f_min = 100000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) mmc->f_max = 52000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) MMC_CAP_4_BIT_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) mmc->caps2 = (MMC_CAP2_BOOTPART_NOACC | MMC_CAP2_NO_SDIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) MMC_VDD_165_195;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) ret = mmc_of_parse(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) goto err_free_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) owl_host->dma = dma_request_chan(&pdev->dev, "mmc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (IS_ERR(owl_host->dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) dev_err(owl_host->dev, "Failed to get external DMA channel.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) ret = PTR_ERR(owl_host->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) goto err_free_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) dev_info(&pdev->dev, "Using %s for DMA transfers\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) dma_chan_name(owl_host->dma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) owl_host->dma_cfg.src_addr = res->start + OWL_REG_SD_DAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) owl_host->dma_cfg.dst_addr = res->start + OWL_REG_SD_DAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) owl_host->dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) owl_host->dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) owl_host->dma_cfg.device_fc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) owl_host->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (owl_host->irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) goto err_release_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) ret = devm_request_irq(&pdev->dev, owl_host->irq, owl_irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 0, dev_name(&pdev->dev), owl_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) dev_err(&pdev->dev, "Failed to request irq %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) owl_host->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) goto err_release_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) ret = mmc_add_host(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) dev_err(&pdev->dev, "Failed to add host\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) goto err_release_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) dev_dbg(&pdev->dev, "Owl MMC Controller Initialized\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) err_release_channel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) dma_release_channel(owl_host->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) err_free_host:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) mmc_free_host(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) static int owl_mmc_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) struct mmc_host *mmc = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) struct owl_mmc_host *owl_host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) mmc_remove_host(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) disable_irq(owl_host->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) dma_release_channel(owl_host->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) mmc_free_host(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) static const struct of_device_id owl_mmc_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) {.compatible = "actions,owl-mmc",},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) { /* sentinel */ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) MODULE_DEVICE_TABLE(of, owl_mmc_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) static struct platform_driver owl_mmc_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) .name = "owl_mmc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) .probe_type = PROBE_PREFER_ASYNCHRONOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) .of_match_table = owl_mmc_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) .probe = owl_mmc_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) .remove = owl_mmc_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) module_platform_driver(owl_mmc_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) MODULE_DESCRIPTION("Actions Semi Owl SoCs SD/MMC Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) MODULE_AUTHOR("Actions Semi");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) MODULE_LICENSE("GPL");