^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Synopsys DesignWare Multimedia Card Interface driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * (Based on NXP driver for lpc 31xx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2009 NXP Semiconductors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/mmc/card.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/mmc/host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/mmc/mmc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/mmc/sd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/mmc/sdio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/regulator/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/of_gpio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/mmc/slot-gpio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/soc/rockchip/rk_sdmmc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/soc/rockchip/rockchip_decompress.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include "dw_mmc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /* Common flag combinations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) SDMMC_INT_HTO | SDMMC_INT_SBE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) SDMMC_INT_EBE | SDMMC_INT_HLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) SDMMC_INT_RESP_ERR | SDMMC_INT_HLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) DW_MCI_CMD_ERROR_FLAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define DW_MCI_SEND_STATUS 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define DW_MCI_RECV_STATUS 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define DW_MCI_DMA_THRESHOLD 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define DW_MCI_FREQ_MIN 100000 /* unit: HZ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) SDMMC_IDMAC_INT_TI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define DESC_RING_BUF_SZ PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct idmac_desc_64addr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) u32 des0; /* Control Descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define IDMAC_OWN_CLR64(x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) !((x) & cpu_to_le32(IDMAC_DES0_OWN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) u32 des1; /* Reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) u32 des2; /*Buffer sizes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) ((cpu_to_le32(s)) & cpu_to_le32(0x1fff)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) u32 des3; /* Reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) u32 des6; /* Lower 32-bits of Next Descriptor Address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) u32 des7; /* Upper 32-bits of Next Descriptor Address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct idmac_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) __le32 des0; /* Control Descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define IDMAC_DES0_DIC BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define IDMAC_DES0_LD BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define IDMAC_DES0_FD BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define IDMAC_DES0_CH BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define IDMAC_DES0_ER BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define IDMAC_DES0_CES BIT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define IDMAC_DES0_OWN BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) __le32 des1; /* Buffer sizes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define IDMAC_SET_BUFFER1_SIZE(d, s) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) __le32 des2; /* buffer 1 physical address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) __le32 des3; /* buffer 2 physical address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /* Each descriptor can transfer up to 4KB of data in chained mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define DW_MCI_DESC_DATA_LENGTH 0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #if IS_ENABLED(CONFIG_CPU_RV1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static spinlock_t *g_sdmmc_ispvicap_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) void rv1106_sdmmc_get_lock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (g_sdmmc_ispvicap_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) spin_lock(g_sdmmc_ispvicap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) EXPORT_SYMBOL(rv1106_sdmmc_get_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) void rv1106_sdmmc_put_lock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (g_sdmmc_ispvicap_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) spin_unlock(g_sdmmc_ispvicap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) EXPORT_SYMBOL(rv1106_sdmmc_put_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #if defined(CONFIG_DEBUG_FS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static int dw_mci_req_show(struct seq_file *s, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct dw_mci_slot *slot = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct mmc_request *mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct mmc_command *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct mmc_command *stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct mmc_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* Make sure we get a consistent snapshot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) spin_lock_bh(&slot->host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) mrq = slot->mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (mrq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) cmd = mrq->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) data = mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) stop = mrq->stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) seq_printf(s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) cmd->opcode, cmd->arg, cmd->flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) cmd->resp[0], cmd->resp[1], cmd->resp[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) cmd->resp[2], cmd->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) data->bytes_xfered, data->blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) data->blksz, data->flags, data->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) seq_printf(s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) stop->opcode, stop->arg, stop->flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) stop->resp[0], stop->resp[1], stop->resp[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) stop->resp[2], stop->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) spin_unlock_bh(&slot->host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) DEFINE_SHOW_ATTRIBUTE(dw_mci_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static int dw_mci_regs_show(struct seq_file *s, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct dw_mci *host = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) pm_runtime_get_sync(host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) seq_printf(s, "CTRL:\t0x%08x\n", mci_readl(host, CTRL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) pm_runtime_put_autosuspend(host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) DEFINE_SHOW_ATTRIBUTE(dw_mci_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct mmc_host *mmc = slot->mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct dw_mci *host = slot->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct dentry *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) root = mmc->debugfs_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (!root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) debugfs_create_file("regs", S_IRUSR, root, host, &dw_mci_regs_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) debugfs_create_file("req", S_IRUSR, root, slot, &dw_mci_req_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) debugfs_create_u32("state", S_IRUSR, root, &host->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) debugfs_create_xul("pending_events", S_IRUSR, root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) &host->pending_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) debugfs_create_xul("completed_events", S_IRUSR, root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) &host->completed_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) #endif /* defined(CONFIG_DEBUG_FS) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) u32 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) ctrl = mci_readl(host, CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) ctrl |= reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) mci_writel(host, CTRL, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /* wait till resets clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (readl_poll_timeout_atomic(host->regs + SDMMC_CTRL, ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) !(ctrl & reset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 1, 500 * USEC_PER_MSEC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) dev_err(host->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) "Timeout resetting block (ctrl reset %#x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) ctrl & reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) u32 delay = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * Databook says that before issuing a new data transfer command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * we need to check to see if the card is busy. Data transfer commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * expected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) #ifdef CONFIG_ROCKCHIP_THUNDER_BOOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (host->slot->mmc->caps2 & MMC_CAP2_NO_SD &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) host->slot->mmc->caps2 & MMC_CAP2_NO_SDIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) delay = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) !(status & SDMMC_STATUS_BUSY),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) delay, 500 * USEC_PER_MSEC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) dev_err(host->dev, "Busy; trying anyway\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct dw_mci *host = slot->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) unsigned int cmd_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) mci_writel(host, CMDARG, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) dw_mci_wait_while_busy(host, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) mci_writel(host, CMD, SDMMC_CMD_START | cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (readl_poll_timeout_atomic(host->regs + SDMMC_CMD, cmd_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) !(cmd_status & SDMMC_CMD_START),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 1, 500 * USEC_PER_MSEC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) dev_err(&slot->mmc->class_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) "Timeout sending command (cmd %#x arg %#x status %#x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) cmd, arg, cmd_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct dw_mci_slot *slot = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct dw_mci *host = slot->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) u32 cmdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) cmd->error = -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) cmdr = cmd->opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (cmd->opcode == MMC_STOP_TRANSMISSION ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) cmd->opcode == MMC_GO_IDLE_STATE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) cmd->opcode == MMC_GO_INACTIVE_STATE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) (cmd->opcode == SD_IO_RW_DIRECT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) cmdr |= SDMMC_CMD_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (cmd->opcode == SD_SWITCH_VOLTAGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) u32 clk_en_a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /* Special bit makes CMD11 not die */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) cmdr |= SDMMC_CMD_VOLT_SWITCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /* Change state to continue to handle CMD11 weirdness */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) WARN_ON(slot->host->state != STATE_SENDING_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) slot->host->state = STATE_SENDING_CMD11;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * We need to disable low power mode (automatic clock stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * while doing voltage switch so we don't confuse the card,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * since stopping the clock is a specific part of the UHS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * voltage change dance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * unconditionally turned back on in dw_mci_setup_bus() if it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * ever called with a non-zero clock. That shouldn't happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * until the voltage change is all done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) clk_en_a = mci_readl(host, CLKENA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) mci_writel(host, CLKENA, clk_en_a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) SDMMC_CMD_PRV_DAT_WAIT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (cmd->flags & MMC_RSP_PRESENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /* We expect a response, so set this bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) cmdr |= SDMMC_CMD_RESP_EXP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (cmd->flags & MMC_RSP_136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) cmdr |= SDMMC_CMD_RESP_LONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (cmd->flags & MMC_RSP_CRC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) cmdr |= SDMMC_CMD_RESP_CRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (cmd->data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) cmdr |= SDMMC_CMD_DAT_EXP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (cmd->data->flags & MMC_DATA_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) cmdr |= SDMMC_CMD_DAT_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) cmdr |= SDMMC_CMD_USE_HOLD_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return cmdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct mmc_command *stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) u32 cmdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (!cmd->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) stop = &host->stop_abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) cmdr = cmd->opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) memset(stop, 0, sizeof(struct mmc_command));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (cmdr == MMC_READ_SINGLE_BLOCK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) cmdr == MMC_READ_MULTIPLE_BLOCK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) cmdr == MMC_WRITE_BLOCK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) cmdr == MMC_WRITE_MULTIPLE_BLOCK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) cmdr == MMC_SEND_TUNING_BLOCK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) cmdr == MMC_SEND_TUNING_BLOCK_HS200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) stop->opcode = MMC_STOP_TRANSMISSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) stop->arg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) } else if (cmdr == SD_IO_RW_EXTENDED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) stop->opcode = SD_IO_RW_DIRECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) ((cmd->arg >> 28) & 0x7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) cmdr = stop->opcode | SDMMC_CMD_STOP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->slot->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) cmdr |= SDMMC_CMD_USE_HOLD_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return cmdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) static inline void dw_mci_set_cto(struct dw_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) unsigned int cto_clks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) unsigned int cto_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) unsigned int cto_ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) cto_clks = mci_readl(host, TMOUT) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (cto_div == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) cto_div = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) cto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * cto_clks * cto_div,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) host->bus_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) /* add a bit spare time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) cto_ms += 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * The durations we're working with are fairly short so we have to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * extra careful about synchronization here. Specifically in hardware a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * command timeout is _at most_ 5.1 ms, so that means we expect an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * interrupt (either command done or timeout) to come rather quickly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * after the mci_writel. ...but just in case we have a long interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * latency let's add a bit of paranoia.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * In general we'll assume that at least an interrupt will be asserted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * in hardware by the time the cto_timer runs. ...and if it hasn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * been asserted in hardware by that time then we'll assume it'll never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * come.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) spin_lock_irqsave(&host->irq_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) mod_timer(&host->cto_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) jiffies + msecs_to_jiffies(cto_ms) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) spin_unlock_irqrestore(&host->irq_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) static void dw_mci_start_command(struct dw_mci *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) struct mmc_command *cmd, u32 cmd_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) host->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) dev_vdbg(host->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) "start command: ARGR=0x%08x CMDR=0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) cmd->arg, cmd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) mci_writel(host, CMDARG, cmd->arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) dw_mci_wait_while_busy(host, cmd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /* response expected command only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (cmd_flags & SDMMC_CMD_RESP_EXP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) dw_mci_set_cto(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct mmc_command *stop = &host->stop_abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) dw_mci_start_command(host, stop, host->stop_cmdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /* DMA interface functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) static void dw_mci_stop_dma(struct dw_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (host->using_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) host->dma_ops->stop(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) host->dma_ops->cleanup(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) /* Data transfer was stopped by the interrupt handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static void dw_mci_dma_cleanup(struct dw_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) struct mmc_data *data = host->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (data && data->host_cookie == COOKIE_MAPPED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) dma_unmap_sg(host->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) data->sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) data->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) mmc_get_dma_dir(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) data->host_cookie = COOKIE_UNMAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) static void dw_mci_idmac_reset(struct dw_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) u32 bmod = mci_readl(host, BMOD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) /* Software reset of DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) bmod |= SDMMC_IDMAC_SWRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) mci_writel(host, BMOD, bmod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) static void dw_mci_idmac_stop_dma(struct dw_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) u32 temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) /* Disable and reset the IDMAC interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) temp = mci_readl(host, CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (!host->is_rv1106_sd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) temp &= ~SDMMC_CTRL_USE_IDMAC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) temp |= SDMMC_CTRL_DMA_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) mci_writel(host, CTRL, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) /* Stop the IDMAC running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) temp = mci_readl(host, BMOD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (host->is_rv1106_sd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) temp |= SDMMC_IDMAC_SWRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) temp |= SDMMC_IDMAC_SWRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) mci_writel(host, BMOD, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) static void dw_mci_dmac_complete_dma(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) struct dw_mci *host = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) struct mmc_data *data = host->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) dev_vdbg(host->dev, "DMA complete\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if ((host->use_dma == TRANS_MODE_EDMAC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) data && (data->flags & MMC_DATA_READ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) /* Invalidate cache after read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) dma_sync_sg_for_cpu(mmc_dev(host->slot->mmc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) data->sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) data->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) host->dma_ops->cleanup(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * If the card was removed, data will be NULL. No point in trying to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * send the stop command or waiting for NBUSY in this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) tasklet_schedule(&host->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (host->need_xfer_timer &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) host->dir_status == DW_MCI_RECV_STATUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) del_timer(&host->xfer_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static int dw_mci_idmac_init(struct dw_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (host->dma_64bit_address == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) struct idmac_desc_64addr *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) /* Number of descriptors in the ring buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) host->ring_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /* Forward link the descriptor list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) i++, p++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) p->des6 = (host->sg_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) (sizeof(struct idmac_desc_64addr) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) (i + 1))) & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) p->des7 = (u64)(host->sg_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) (sizeof(struct idmac_desc_64addr) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) (i + 1))) >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) /* Initialize reserved and buffer size fields to "0" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) p->des0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) p->des1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) p->des2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) p->des3 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /* Set the last descriptor as the end-of-ring descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) p->des6 = host->sg_dma & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) p->des7 = (u64)host->sg_dma >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) p->des0 = IDMAC_DES0_ER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct idmac_desc *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) /* Number of descriptors in the ring buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) host->ring_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) DESC_RING_BUF_SZ / sizeof(struct idmac_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) /* Forward link the descriptor list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) for (i = 0, p = host->sg_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) i < host->ring_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) i++, p++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) p->des3 = cpu_to_le32(host->sg_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) (sizeof(struct idmac_desc) * (i + 1)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) p->des0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) p->des1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) /* Set the last descriptor as the end-of-ring descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) p->des3 = cpu_to_le32(host->sg_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) p->des0 = cpu_to_le32(IDMAC_DES0_ER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) dw_mci_idmac_reset(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (host->dma_64bit_address == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) /* Mask out interrupts - get Tx & Rx complete only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) mci_writel(host, IDSTS64, IDMAC_INT_CLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) /* Set the descriptor base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /* Mask out interrupts - get Tx & Rx complete only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) mci_writel(host, IDSTS, IDMAC_INT_CLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /* Set the descriptor base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) mci_writel(host, DBADDR, host->sg_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) static inline int dw_mci_prepare_desc64(struct dw_mci *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) struct mmc_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) unsigned int sg_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) unsigned int desc_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) struct idmac_desc_64addr *desc_first, *desc_last, *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) desc_first = desc_last = desc = host->sg_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) for (i = 0; i < sg_len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) unsigned int length = sg_dma_len(&data->sg[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) u64 mem_addr = sg_dma_address(&data->sg[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) for ( ; length ; desc++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) length : DW_MCI_DESC_DATA_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) length -= desc_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * Wait for the former clear OWN bit operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * of IDMAC to make sure that this descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * isn't still owned by IDMAC as IDMAC's write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * ops and CPU's read ops are asynchronous.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (readl_poll_timeout_atomic(&desc->des0, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) !(val & IDMAC_DES0_OWN),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 10, 100 * USEC_PER_MSEC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) goto err_own_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * Set the OWN bit and disable interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * for this descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) IDMAC_DES0_CH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) /* Buffer length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) /* Physical address to DMA to/from */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) desc->des4 = mem_addr & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) desc->des5 = mem_addr >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) /* Update physical address for the next desc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) mem_addr += desc_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) /* Save pointer to the last descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) desc_last = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) /* Set first descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) desc_first->des0 |= IDMAC_DES0_FD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) /* Set last descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) desc_last->des0 |= IDMAC_DES0_LD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) err_own_bit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) /* restore the descriptor chain as it's polluted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) dw_mci_idmac_init(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) static inline int dw_mci_prepare_desc32(struct dw_mci *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) struct mmc_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) unsigned int sg_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) unsigned int desc_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) struct idmac_desc *desc_first, *desc_last, *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) desc_first = desc_last = desc = host->sg_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) for (i = 0; i < sg_len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) unsigned int length = sg_dma_len(&data->sg[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) u32 mem_addr = sg_dma_address(&data->sg[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) for ( ; length ; desc++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) length : DW_MCI_DESC_DATA_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) length -= desc_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * Wait for the former clear OWN bit operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * of IDMAC to make sure that this descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * isn't still owned by IDMAC as IDMAC's write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * ops and CPU's read ops are asynchronous.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (readl_poll_timeout_atomic(&desc->des0, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) IDMAC_OWN_CLR64(val),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 100 * USEC_PER_MSEC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) goto err_own_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * Set the OWN bit and disable interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * for this descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) IDMAC_DES0_DIC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) IDMAC_DES0_CH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) /* Buffer length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) /* Physical address to DMA to/from */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) desc->des2 = cpu_to_le32(mem_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) /* Update physical address for the next desc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) mem_addr += desc_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) /* Save pointer to the last descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) desc_last = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (host->is_rv1106_sd && (data->flags & MMC_DATA_WRITE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) desc->des0 = desc_last->des0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) desc->des2 = desc_last->des2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) desc->des1 = 0x8; /* Random dirty data for last one desc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) desc_last = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) /* Set first descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) /* Set last descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) IDMAC_DES0_DIC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) err_own_bit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) /* restore the descriptor chain as it's polluted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) dw_mci_idmac_init(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) u32 temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) if (host->dma_64bit_address == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) ret = dw_mci_prepare_desc64(host, host->data, sg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) ret = dw_mci_prepare_desc32(host, host->data, sg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) /* Make sure to reset DMA in case we did PIO before this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) dw_mci_idmac_reset(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) /* Select IDMAC interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) temp = mci_readl(host, CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) temp |= SDMMC_CTRL_USE_IDMAC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) mci_writel(host, CTRL, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) /* Enable the IDMAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) temp = mci_readl(host, BMOD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) mci_writel(host, BMOD, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) /* Start it running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) mci_writel(host, PLDMND, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) .init = dw_mci_idmac_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) .start = dw_mci_idmac_start_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) .stop = dw_mci_idmac_stop_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) .complete = dw_mci_dmac_complete_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) .cleanup = dw_mci_dma_cleanup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) static void dw_mci_edmac_stop_dma(struct dw_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) dmaengine_terminate_async(host->dms->ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) static int dw_mci_edmac_start_dma(struct dw_mci *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) unsigned int sg_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) struct dma_slave_config cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) struct dma_async_tx_descriptor *desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) struct scatterlist *sgl = host->data->sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) u32 sg_elems = host->data->sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) u32 fifoth_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) u32 fifo_offset = host->fifo_reg - host->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) /* Set external dma config: burst size, burst width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) memset(&cfg, 0, sizeof(cfg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) cfg.dst_addr = host->phy_regs + fifo_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) cfg.src_addr = cfg.dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) /* Match burst msize with external dma config */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) fifoth_val = mci_readl(host, FIFOTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) cfg.src_maxburst = cfg.dst_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) if (host->data->flags & MMC_DATA_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) cfg.direction = DMA_MEM_TO_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) cfg.direction = DMA_DEV_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) ret = dmaengine_slave_config(host->dms->ch, &cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) dev_err(host->dev, "Failed to config edmac.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) desc = dmaengine_prep_slave_sg(host->dms->ch, sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) sg_len, cfg.direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) dev_err(host->dev, "Can't prepare slave sg.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) /* Set dw_mci_dmac_complete_dma as callback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) desc->callback = dw_mci_dmac_complete_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) desc->callback_param = (void *)host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) dmaengine_submit(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) /* Flush cache before write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (host->data->flags & MMC_DATA_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) dma_sync_sg_for_device(mmc_dev(host->slot->mmc), sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) sg_elems, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) dma_async_issue_pending(host->dms->ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) static int dw_mci_edmac_init(struct dw_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) /* Request external dma channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (!host->dms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) host->dms->ch = dma_request_chan(host->dev, "rx-tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (IS_ERR(host->dms->ch)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) int ret = PTR_ERR(host->dms->ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) dev_err(host->dev, "Failed to get external DMA channel.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) kfree(host->dms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) host->dms = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) static void dw_mci_edmac_exit(struct dw_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (host->dms) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) if (host->dms->ch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) dma_release_channel(host->dms->ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) host->dms->ch = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) kfree(host->dms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) host->dms = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) .init = dw_mci_edmac_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) .exit = dw_mci_edmac_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) .start = dw_mci_edmac_start_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) .stop = dw_mci_edmac_stop_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) .complete = dw_mci_dmac_complete_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) .cleanup = dw_mci_dma_cleanup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) static int dw_mci_pre_dma_transfer(struct dw_mci *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) struct mmc_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) int cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) unsigned int i, sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (data->host_cookie == COOKIE_PRE_MAPPED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) return data->sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * We don't do DMA on "complex" transfers, i.e. with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * non-word-aligned buffers or lengths. Also, we don't bother
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * with all the DMA setup overhead for short transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD && !host->is_rv1106_sd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) if (data->blksz & 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) for_each_sg(data->sg, sg, data->sg_len, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) if (sg->offset & 3 || sg->length & 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) sg_len = dma_map_sg(host->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) data->sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) data->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) mmc_get_dma_dir(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) if (sg_len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) data->host_cookie = cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) return sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) static void dw_mci_pre_req(struct mmc_host *mmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) struct dw_mci_slot *slot = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) struct mmc_data *data = mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (!slot->host->use_dma || !data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) /* This data might be unmapped at this time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) data->host_cookie = COOKIE_UNMAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) if (dw_mci_pre_dma_transfer(slot->host, mrq->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) COOKIE_PRE_MAPPED) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) data->host_cookie = COOKIE_UNMAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) static void dw_mci_post_req(struct mmc_host *mmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) struct mmc_request *mrq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) struct dw_mci_slot *slot = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) struct mmc_data *data = mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) if (!slot->host->use_dma || !data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) if (data->host_cookie != COOKIE_UNMAPPED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) dma_unmap_sg(slot->host->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) data->sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) data->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) mmc_get_dma_dir(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) data->host_cookie = COOKIE_UNMAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) static int dw_mci_get_cd(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) int present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) struct dw_mci_slot *slot = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) struct dw_mci *host = slot->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) int gpio_cd = mmc_gpio_get_cd(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) /* Use platform get_cd function, else try onboard card detect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (((mmc->caps & MMC_CAP_NEEDS_POLL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) || !mmc_card_is_removable(mmc))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) present = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if (mmc->caps & MMC_CAP_NEEDS_POLL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) dev_info(&mmc->class_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) "card is polling.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) dev_info(&mmc->class_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) "card is non-removable.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) return present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) } else if (gpio_cd >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) present = gpio_cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) == 0 ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) spin_lock_bh(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) dev_dbg(&mmc->class_dev, "card is present\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) else if (!present &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) !test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) dev_dbg(&mmc->class_dev, "card is not present\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) spin_unlock_bh(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) return present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) unsigned int blksz = data->blksz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) u32 fifo_width = 1 << host->data_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) u32 blksz_depth = blksz / fifo_width, fifoth_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) int idx = ARRAY_SIZE(mszs) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) /* pio should ship this scenario */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) if (!host->use_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) tx_wmark = (host->fifo_depth) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) tx_wmark_invers = host->fifo_depth - tx_wmark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) * MSIZE is '1',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) * if blksz is not a multiple of the FIFO width
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if (blksz % fifo_width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) if (!((blksz_depth % mszs[idx]) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) (tx_wmark_invers % mszs[idx]))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) msize = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) rx_wmark = mszs[idx] - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) } while (--idx > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) * If idx is '0', it won't be tried
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) * Thus, initial values are uesed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) mci_writel(host, FIFOTH, fifoth_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) unsigned int blksz = data->blksz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) u32 blksz_depth, fifo_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) u16 thld_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) u8 enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) * in the FIFO region, so we really shouldn't access it).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) if (host->verid < DW_MMC_240A ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) (host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) * Card write Threshold is introduced since 2.80a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) * It's used when HS400 mode is enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) if (data->flags & MMC_DATA_WRITE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) host->timing != MMC_TIMING_MMC_HS400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) goto disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) if (data->flags & MMC_DATA_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) enable = SDMMC_CARD_WR_THR_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) enable = SDMMC_CARD_RD_THR_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (host->timing != MMC_TIMING_MMC_HS200 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) host->timing != MMC_TIMING_UHS_SDR104 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) host->timing != MMC_TIMING_MMC_HS400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) goto disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) blksz_depth = blksz / (1 << host->data_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) fifo_depth = host->fifo_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (blksz_depth > fifo_depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) goto disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) * Currently just choose blksz.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) thld_size = blksz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) mci_writel(host, CDTHRCTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) int sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) u32 temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) host->using_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) /* If we don't have a channel, we can't do DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) if (!host->use_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) sg_len = dw_mci_pre_dma_transfer(host, data, COOKIE_MAPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) if (sg_len < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) host->dma_ops->stop(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) return sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) host->using_dma = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (host->use_dma == TRANS_MODE_IDMAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) dev_vdbg(host->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) (unsigned long)host->sg_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) (unsigned long)host->sg_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) sg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * Decide the MSIZE and RX/TX Watermark.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) * If current block size is same with previous size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) * no need to update fifoth.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) if (host->prev_blksz != data->blksz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) dw_mci_adjust_fifoth(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) /* Enable the DMA interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) temp = mci_readl(host, CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) temp |= SDMMC_CTRL_DMA_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) mci_writel(host, CTRL, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) /* Disable RX/TX IRQs, let DMA handle it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) spin_lock_irqsave(&host->irq_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) temp = mci_readl(host, INTMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) mci_writel(host, INTMASK, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) spin_unlock_irqrestore(&host->irq_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) if (host->dma_ops->start(host, sg_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) host->dma_ops->stop(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) /* We can't do DMA, try PIO for this one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) dev_dbg(host->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) "%s: fall back to PIO mode for current transfer\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) int flags = SG_MITER_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) u32 temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) data->error = -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) WARN_ON(host->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) host->sg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) host->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (data->flags & MMC_DATA_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) host->dir_status = DW_MCI_RECV_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) host->dir_status = DW_MCI_SEND_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) dw_mci_ctrl_thld(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) if (dw_mci_submit_data_dma(host, data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (host->data->flags & MMC_DATA_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) flags |= SG_MITER_TO_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) flags |= SG_MITER_FROM_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) host->sg = data->sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) host->part_buf_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) host->part_buf_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) spin_lock_irqsave(&host->irq_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) temp = mci_readl(host, INTMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) mci_writel(host, INTMASK, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) spin_unlock_irqrestore(&host->irq_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) temp = mci_readl(host, CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) temp &= ~SDMMC_CTRL_DMA_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) mci_writel(host, CTRL, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) * Use the initial fifoth_val for PIO mode. If wm_algined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) * is set, we set watermark same as data size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) * If next issued data may be transfered by DMA mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) * prev_blksz should be invalidated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) if (host->wm_aligned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) dw_mci_adjust_fifoth(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) mci_writel(host, FIFOTH, host->fifoth_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) host->prev_blksz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) * Keep the current block size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) * It will be used to decide whether to update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) * fifoth register next time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) host->prev_blksz = data->blksz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) struct dw_mci *host = slot->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) unsigned int clock = slot->clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) u32 div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) u32 clk_en_a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) /* We must continue to set bit 28 in CMD until the change is complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) if (host->state == STATE_WAITING_CMD11_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) slot->mmc->actual_clock = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) if (!clock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) mci_writel(host, CLKENA, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) mci_send_cmd(slot, sdmmc_cmd_bits, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) } else if (clock != host->current_speed || force_clkinit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) div = host->bus_hz / clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) if (host->bus_hz % clock && host->bus_hz > clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) * move the + 1 after the divide to prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) * over-clocking the card.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) div += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) if ((clock != slot->__clk_old &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) !test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) force_clkinit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) /* Silent the verbose log if calling from PM context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) if (!force_clkinit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) dev_info(&slot->mmc->class_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) slot->id, host->bus_hz, clock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) div ? ((host->bus_hz / div) >> 1) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) host->bus_hz, div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) * If card is polling, display the message only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) * one time at boot time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) if (slot->mmc->caps & MMC_CAP_NEEDS_POLL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) slot->mmc->f_min == clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) set_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) /* disable clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) mci_writel(host, CLKENA, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) mci_writel(host, CLKSRC, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) /* inform CIU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) mci_send_cmd(slot, sdmmc_cmd_bits, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) /* set clock to desired speed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) mci_writel(host, CLKDIV, div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) /* inform CIU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) mci_send_cmd(slot, sdmmc_cmd_bits, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) /* enable clock; only low power if no SDIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) mci_writel(host, CLKENA, clk_en_a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) /* inform CIU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) mci_send_cmd(slot, sdmmc_cmd_bits, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) /* keep the last clock value that was requested from core */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) slot->__clk_old = clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) slot->mmc->actual_clock = div ? ((host->bus_hz / div) >> 1) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) host->bus_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) host->current_speed = clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) /* Set the current slot bus width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) mci_writel(host, CTYPE, (slot->ctype << slot->id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) static void __dw_mci_start_request(struct dw_mci *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) struct dw_mci_slot *slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) struct mmc_request *mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) struct mmc_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) u32 cmdflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) mrq = slot->mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) host->mrq = mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) host->pending_events = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) host->completed_events = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) host->cmd_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) host->data_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) host->dir_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) if (host->is_rv1106_sd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) mci_writel(host, CTYPE, (slot->ctype << slot->id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) data = cmd->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) if (data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) mci_writel(host, TMOUT, 0xFFFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) if (host->is_rv1106_sd && (data->flags & MMC_DATA_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) mci_writel(host, BYTCNT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) mci_writel(host, BYTCNT, data->blksz*data->blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) mci_writel(host, BLKSIZ, data->blksz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) /* this is the first command, send the initialization clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) cmdflags |= SDMMC_CMD_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) if (data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) dw_mci_submit_data(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) dw_mci_start_command(host, cmd, cmdflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) if (cmd->opcode == SD_SWITCH_VOLTAGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) * Databook says to fail after 2ms w/ no response, but evidence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) * shows that sometimes the cmd11 interrupt takes over 130ms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) * We'll set to 500ms, plus an extra jiffy just in case jiffies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) * is just about to roll over.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) * We do this whole thing under spinlock and only if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) * command hasn't already completed (indicating the the irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) * already ran so we don't want the timeout).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) spin_lock_irqsave(&host->irq_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) mod_timer(&host->cmd11_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) jiffies + msecs_to_jiffies(500) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) spin_unlock_irqrestore(&host->irq_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) static void dw_mci_start_request(struct dw_mci *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) struct dw_mci_slot *slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) struct mmc_request *mrq = slot->mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) struct mmc_command *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) __dw_mci_start_request(host, slot, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) /* must be called with host->lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) host->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) slot->mrq = mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) if (host->state == STATE_WAITING_CMD11_DONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) dev_warn(&slot->mmc->class_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) "Voltage change didn't complete\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) * this case isn't expected to happen, so we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) * either crash here or just try to continue on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) * in the closest possible state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) host->state = STATE_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) if (host->state == STATE_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) host->state = STATE_SENDING_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) dw_mci_start_request(host, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) list_add_tail(&slot->queue_node, &host->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) static bool dw_mci_reset(struct dw_mci *host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) struct dw_mci_slot *slot = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) struct dw_mci *host = slot->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) WARN_ON(slot->mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) * The check for card presence and queueing of the request must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) * atomic, otherwise the card could be removed in between and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) * request wouldn't fail until another card was inserted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) if (!dw_mci_get_cd(mmc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) mrq->cmd->error = -ENOMEDIUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) mmc_request_done(mmc, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) if (host->is_rv1106_sd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) readl_poll_timeout(host->regs + SDMMC_STATUS, reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) reg & BIT(2), USEC_PER_MSEC, 500 * USEC_PER_MSEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) spin_lock_bh(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) if (host->is_rv1106_sd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) dw_mci_reset(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) dw_mci_queue_request(host, slot, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) spin_unlock_bh(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) struct dw_mci_slot *slot = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) u32 regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) switch (ios->bus_width) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) case MMC_BUS_WIDTH_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) slot->ctype = SDMMC_CTYPE_4BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) case MMC_BUS_WIDTH_8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) slot->ctype = SDMMC_CTYPE_8BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) /* set default 1 bit mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) slot->ctype = SDMMC_CTYPE_1BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) regs = mci_readl(slot->host, UHS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) /* DDR mode set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) if (ios->timing == MMC_TIMING_MMC_DDR52 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) ios->timing == MMC_TIMING_UHS_DDR50 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) ios->timing == MMC_TIMING_MMC_HS400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) regs |= ((0x1 << slot->id) << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) regs &= ~((0x1 << slot->id) << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) mci_writel(slot->host, UHS_REG, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) slot->host->timing = ios->timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) * Use mirror of ios->clock to prevent race with mmc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) * core ios update when finding the minimum.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) slot->clock = ios->clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) if (drv_data && drv_data->set_ios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) drv_data->set_ios(slot->host, ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) switch (ios->power_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) case MMC_POWER_UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) if (!IS_ERR(mmc->supply.vmmc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) ios->vdd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) dev_err(slot->host->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) "failed to enable vmmc regulator\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) /*return, if failed turn on vmmc*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) regs = mci_readl(slot->host, PWREN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) regs |= (1 << slot->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) mci_writel(slot->host, PWREN, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) case MMC_POWER_ON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) if (!slot->host->vqmmc_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) if (!IS_ERR(mmc->supply.vqmmc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) ret = regulator_enable(mmc->supply.vqmmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) dev_err(slot->host->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) "failed to enable vqmmc\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) slot->host->vqmmc_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) /* Keep track so we don't reset again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) slot->host->vqmmc_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) /* Reset our state machine after powering on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) dw_mci_ctrl_reset(slot->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) SDMMC_CTRL_ALL_RESET_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) /* Adjust clock / bus width after power is up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) dw_mci_setup_bus(slot, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) case MMC_POWER_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) /* Turn clock off before power goes down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) dw_mci_setup_bus(slot, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) if (!IS_ERR(mmc->supply.vmmc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) regulator_disable(mmc->supply.vqmmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) slot->host->vqmmc_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) regs = mci_readl(slot->host, PWREN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) regs &= ~(1 << slot->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) mci_writel(slot->host, PWREN, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) slot->host->state = STATE_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) static int dw_mci_card_busy(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) struct dw_mci_slot *slot = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) * Check the busy bit which is low when DAT[3:0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) * (the data lines) are 0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) status = mci_readl(slot->host, STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) return !!(status & SDMMC_STATUS_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) struct dw_mci_slot *slot = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) struct dw_mci *host = slot->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) const struct dw_mci_drv_data *drv_data = host->drv_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) u32 uhs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) u32 v18 = SDMMC_UHS_18V << slot->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if (drv_data && drv_data->switch_voltage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) return drv_data->switch_voltage(mmc, ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) * Program the voltage. Note that some instances of dw_mmc may use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) * the UHS_REG for this. For other instances (like exynos) the UHS_REG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) * does no harm but you need to set the regulator directly. Try both.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) uhs = mci_readl(host, UHS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) uhs &= ~v18;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) uhs |= v18;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) if (!IS_ERR(mmc->supply.vqmmc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) ret = mmc_regulator_set_vqmmc(mmc, ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) dev_dbg(&mmc->class_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) "Regulator set error %d - %s V\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) ret, uhs & v18 ? "1.8" : "3.3");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) mci_writel(host, UHS_REG, uhs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) static int dw_mci_get_ro(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) int read_only;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) struct dw_mci_slot *slot = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) int gpio_ro = mmc_gpio_get_ro(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) /* Use platform get_ro function, else try on board write protect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) if (gpio_ro >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) read_only = gpio_ro;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) read_only =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) dev_dbg(&mmc->class_dev, "card is %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) read_only ? "read-only" : "read-write");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) return read_only;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) static void dw_mci_hw_reset(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) struct dw_mci_slot *slot = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) struct dw_mci *host = slot->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) int reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) if (host->use_dma == TRANS_MODE_IDMAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) dw_mci_idmac_reset(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) SDMMC_CTRL_FIFO_RESET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) * According to eMMC spec, card reset procedure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) * tRstW >= 1us: RST_n pulse width
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) * tRSCA >= 200us: RST_n to Command time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) * tRSTH >= 1us: RST_n high period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) reset = mci_readl(host, RST_N);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) reset &= ~(SDMMC_RST_HWACTIVE << slot->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) mci_writel(host, RST_N, reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) usleep_range(1, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) reset |= SDMMC_RST_HWACTIVE << slot->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) mci_writel(host, RST_N, reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) usleep_range(200, 300);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) struct dw_mci_slot *slot = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) struct dw_mci *host = slot->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) * Low power mode will stop the card clock when idle. According to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) * description of the CLKENA register we should disable low power mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) * for SDIO cards if we need SDIO interrupts to work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) if (mmc->caps & MMC_CAP_SDIO_IRQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) u32 clk_en_a_old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) u32 clk_en_a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) clk_en_a_old = mci_readl(host, CLKENA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) if (card->type == MMC_TYPE_SDIO ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) card->type == MMC_TYPE_SD_COMBO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) clk_en_a = clk_en_a_old & ~clken_low_pwr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) clk_en_a = clk_en_a_old | clken_low_pwr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) if (clk_en_a != clk_en_a_old) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) mci_writel(host, CLKENA, clk_en_a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) SDMMC_CMD_PRV_DAT_WAIT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) static void __dw_mci_enable_sdio_irq(struct dw_mci_slot *slot, int enb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) struct dw_mci *host = slot->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) u32 int_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) spin_lock_irqsave(&host->irq_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) /* Enable/disable Slot Specific SDIO interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) int_mask = mci_readl(host, INTMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) if (enb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) int_mask |= SDMMC_INT_SDIO(slot->sdio_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) mci_writel(host, INTMASK, int_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) spin_unlock_irqrestore(&host->irq_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) struct dw_mci_slot *slot = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) struct dw_mci *host = slot->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) __dw_mci_enable_sdio_irq(slot, enb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) /* Avoid runtime suspending the device when SDIO IRQ is enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) if (enb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) pm_runtime_get_noresume(host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) pm_runtime_put_noidle(host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) static void dw_mci_ack_sdio_irq(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) struct dw_mci_slot *slot = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) __dw_mci_enable_sdio_irq(slot, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) struct dw_mci_slot *slot = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) struct dw_mci *host = slot->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) const struct dw_mci_drv_data *drv_data = host->drv_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) if (drv_data && drv_data->execute_tuning)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) err = drv_data->execute_tuning(slot, opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) struct mmc_ios *ios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) struct dw_mci_slot *slot = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) struct dw_mci *host = slot->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) const struct dw_mci_drv_data *drv_data = host->drv_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) if (drv_data && drv_data->prepare_hs400_tuning)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) return drv_data->prepare_hs400_tuning(host, ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) static bool dw_mci_reset(struct dw_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) u32 status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) * Resetting generates a block interrupt, hence setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) * the scatter-gather pointer to NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) if (host->sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) sg_miter_stop(&host->sg_miter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) host->sg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) if (host->use_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) flags |= SDMMC_CTRL_DMA_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) if (dw_mci_ctrl_reset(host, flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) * In all cases we clear the RAWINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) * register to clear any interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) mci_writel(host, RINTSTS, 0xFFFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) if (!host->use_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) goto ciu_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) /* Wait for dma_req to be cleared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) !(status & SDMMC_STATUS_DMA_REQ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 1, 500 * USEC_PER_MSEC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) dev_err(host->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) "%s: Timeout waiting for dma_req to be cleared\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) goto ciu_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) /* when using DMA next we reset the fifo again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) goto ciu_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) /* if the controller reset bit did clear, then set clock regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) dev_err(host->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) goto ciu_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) if (host->use_dma == TRANS_MODE_IDMAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) /* It is also required that we reinit idmac */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) dw_mci_idmac_init(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) ciu_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) /* After a CTRL reset we need to have CIU set clock registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) mci_send_cmd(host->slot, SDMMC_CMD_UPD_CLK, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) static const struct mmc_host_ops dw_mci_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) .request = dw_mci_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) .pre_req = dw_mci_pre_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) .post_req = dw_mci_post_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) .set_ios = dw_mci_set_ios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) .get_ro = dw_mci_get_ro,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) .get_cd = dw_mci_get_cd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) .hw_reset = dw_mci_hw_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) .enable_sdio_irq = dw_mci_enable_sdio_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) .ack_sdio_irq = dw_mci_ack_sdio_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) .execute_tuning = dw_mci_execute_tuning,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) .card_busy = dw_mci_card_busy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) .start_signal_voltage_switch = dw_mci_switch_voltage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) .init_card = dw_mci_init_card,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) .prepare_hs400_tuning = dw_mci_prepare_hs400_tuning,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) __releases(&host->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) __acquires(&host->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) struct dw_mci_slot *slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) struct mmc_host *prev_mmc = host->slot->mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) WARN_ON(host->cmd || host->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) host->slot->mrq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) host->mrq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) if (!list_empty(&host->queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) slot = list_entry(host->queue.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) struct dw_mci_slot, queue_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) list_del(&slot->queue_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) dev_vdbg(host->dev, "list not empty: %s is next\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) mmc_hostname(slot->mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) host->state = STATE_SENDING_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) dw_mci_start_request(host, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) dev_vdbg(host->dev, "list empty\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) if (host->state == STATE_SENDING_CMD11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) host->state = STATE_WAITING_CMD11_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) host->state = STATE_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) spin_unlock(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) mmc_request_done(prev_mmc, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) spin_lock(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) u32 status = host->cmd_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) host->cmd_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) /* Read the response from the card (up to 16 bytes) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) if (cmd->flags & MMC_RSP_PRESENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) if (cmd->flags & MMC_RSP_136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) cmd->resp[3] = mci_readl(host, RESP0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) cmd->resp[2] = mci_readl(host, RESP1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) cmd->resp[1] = mci_readl(host, RESP2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) cmd->resp[0] = mci_readl(host, RESP3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) cmd->resp[0] = mci_readl(host, RESP0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) cmd->resp[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) cmd->resp[2] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) cmd->resp[3] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) if (status & SDMMC_INT_RTO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) cmd->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) cmd->error = -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) else if (status & SDMMC_INT_RESP_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) cmd->error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) cmd->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) return cmd->error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) u32 status = host->data_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) if (host->is_rv1106_sd && (data->flags & MMC_DATA_WRITE) && (status & SDMMC_INT_DATA_OVER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) if (status & DW_MCI_DATA_ERROR_FLAGS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) if (status & SDMMC_INT_DRTO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) data->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) } else if (status & SDMMC_INT_DCRC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) data->error = -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) } else if (status & SDMMC_INT_EBE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) if (host->dir_status ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) DW_MCI_SEND_STATUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) * No data CRC status was returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) * The number of bytes transferred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) * will be exaggerated in PIO mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) data->bytes_xfered = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) data->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) } else if (host->dir_status ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) DW_MCI_RECV_STATUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) data->error = -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) /* SDMMC_INT_SBE is included */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) data->error = -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) dev_dbg(host->dev, "data error, status 0x%08x\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) * After an error, there may be data lingering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) * in the FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) dw_mci_reset(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) finish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) data->bytes_xfered = data->blocks * data->blksz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) data->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) return data->error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) static void dw_mci_set_drto(struct dw_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) unsigned int drto_clks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) unsigned int drto_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) unsigned int drto_ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) drto_clks = mci_readl(host, TMOUT) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) if (drto_div == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) drto_div = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) host->bus_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) /* add a bit spare time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) drto_ms += 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) spin_lock_irqsave(&host->irq_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) mod_timer(&host->dto_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) jiffies + msecs_to_jiffies(drto_ms));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) spin_unlock_irqrestore(&host->irq_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) static void dw_mci_set_xfer_timeout(struct dw_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) unsigned int xfer_clks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) unsigned int xfer_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) unsigned int xfer_ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) xfer_clks = mci_readl(host, TMOUT) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) xfer_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) if (xfer_div == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) xfer_div = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) xfer_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * xfer_clks * xfer_div,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) host->bus_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) /* add a bit spare time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) xfer_ms += 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) spin_lock_irqsave(&host->irq_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) if (!test_bit(EVENT_XFER_COMPLETE, &host->pending_events))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) mod_timer(&host->xfer_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) jiffies + msecs_to_jiffies(xfer_ms));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) spin_unlock_irqrestore(&host->irq_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) * Really be certain that the timer has stopped. This is a bit of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) * paranoia and could only really happen if we had really bad
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) * interrupt latency and the interrupt routine and timeout were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) * running concurrently so that the del_timer() in the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) * handler couldn't run.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) WARN_ON(del_timer_sync(&host->cto_timer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) clear_bit(EVENT_CMD_COMPLETE, &host->pending_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) static bool dw_mci_clear_pending_data_complete(struct dw_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) /* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) WARN_ON(del_timer_sync(&host->dto_timer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) clear_bit(EVENT_DATA_COMPLETE, &host->pending_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) static void dw_mci_tasklet_func(unsigned long priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) struct dw_mci *host = (struct dw_mci *)priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) struct mmc_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) struct mmc_command *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) struct mmc_request *mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) enum dw_mci_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) enum dw_mci_state prev_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) unsigned int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) spin_lock(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) state = host->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) data = host->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) mrq = host->mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) prev_state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) case STATE_IDLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) case STATE_WAITING_CMD11_DONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) case STATE_SENDING_CMD11:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) case STATE_SENDING_CMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) if (!dw_mci_clear_pending_cmd_complete(host))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) cmd = host->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) host->cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) err = dw_mci_command_complete(host, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) if (cmd == mrq->sbc && !err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) __dw_mci_start_request(host, host->slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) mrq->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) if (cmd->data && err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) * During UHS tuning sequence, sending the stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) * command after the response CRC error would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) * throw the system into a confused state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) * causing all future tuning phases to report
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) * failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) * In such case controller will move into a data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) * transfer state after a response error or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) * response CRC error. Let's let that finish
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) * before trying to send a stop, so we'll go to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) * STATE_SENDING_DATA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) * Although letting the data transfer take place
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) * will waste a bit of time (we already know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) * the command was bad), it can't cause any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) * errors since it's possible it would have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) * taken place anyway if this tasklet got
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) * delayed. Allowing the transfer to take place
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) * avoids races and keeps things simple.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) if (err != -ETIMEDOUT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) host->dir_status == DW_MCI_RECV_STATUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) state = STATE_SENDING_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) send_stop_abort(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) dw_mci_stop_dma(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) state = STATE_SENDING_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) if (!cmd->data || err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) dw_mci_request_end(host, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) prev_state = state = STATE_SENDING_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) case STATE_SENDING_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) * We could get a data error and never a transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) * complete so we'd better check for it here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) * Note that we don't really care if we also got a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) * transfer complete; stopping the DMA and sending an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) * abort won't hurt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) if (test_and_clear_bit(EVENT_DATA_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) &host->pending_events)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) if (!(host->data_status & (SDMMC_INT_DRTO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) SDMMC_INT_EBE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) send_stop_abort(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) dw_mci_stop_dma(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) state = STATE_DATA_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) &host->pending_events)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) * If all data-related interrupts don't come
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) * within the given time in reading data state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) if (host->dir_status == DW_MCI_RECV_STATUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) dw_mci_set_drto(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) if (host->need_xfer_timer &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) host->dir_status == DW_MCI_RECV_STATUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) dw_mci_set_xfer_timeout(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) * Handle an EVENT_DATA_ERROR that might have shown up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) * before the transfer completed. This might not have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) * been caught by the check above because the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) * could have gone off between the previous check and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) * the check for transfer complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) * Technically this ought not be needed assuming we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) * get a DATA_COMPLETE eventually (we'll notice the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) * error and end the request), but it shouldn't hurt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) * This has the advantage of sending the stop command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) if (test_and_clear_bit(EVENT_DATA_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) &host->pending_events)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) if (!(host->data_status & (SDMMC_INT_DRTO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) SDMMC_INT_EBE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) send_stop_abort(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) dw_mci_stop_dma(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) state = STATE_DATA_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) prev_state = state = STATE_DATA_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) case STATE_DATA_BUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) if (!dw_mci_clear_pending_data_complete(host)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) * If data error interrupt comes but data over
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) * interrupt doesn't come within the given time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) * in reading data state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) if (host->dir_status == DW_MCI_RECV_STATUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) dw_mci_set_drto(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) host->data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) err = dw_mci_data_complete(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) if (!data->stop || mrq->sbc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) if (mrq->sbc && data->stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) data->stop->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) dw_mci_request_end(host, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) /* stop command for open-ended transfer*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) if (data->stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) send_stop_abort(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) * If we don't have a command complete now we'll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) * never get one since we just reset everything;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) * better end the request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) * If we do have a command complete we'll fall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) * through to the SENDING_STOP command and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) * everything will be peachy keen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) if (!test_bit(EVENT_CMD_COMPLETE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) &host->pending_events)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) host->cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) dw_mci_request_end(host, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) * If err has non-zero,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) * stop-abort command has been already issued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) prev_state = state = STATE_SENDING_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) case STATE_SENDING_STOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) if (!dw_mci_clear_pending_cmd_complete(host))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) /* CMD error in data command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) if (mrq->cmd->error && mrq->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) dw_mci_reset(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) host->cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) host->data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) if (!mrq->sbc && mrq->stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) dw_mci_command_complete(host, mrq->stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) host->cmd_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) dw_mci_request_end(host, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) case STATE_DATA_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) &host->pending_events))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) state = STATE_DATA_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) } while (state != prev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) host->state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) spin_unlock(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) /* push final bytes to part_buf, only use during push */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) memcpy((void *)&host->part_buf, buf, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) host->part_buf_count = cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) /* append bytes to part_buf, only use during push */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) host->part_buf_count += cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) /* pull first bytes from part_buf, only use during pull */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) cnt = min_t(int, cnt, host->part_buf_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) if (cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) host->part_buf_count -= cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) host->part_buf_start += cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) /* pull final bytes from the part_buf, assuming it's just been filled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) memcpy(buf, &host->part_buf, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) host->part_buf_start = cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) host->part_buf_count = (1 << host->data_shift) - cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) struct mmc_data *data = host->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) int init_cnt = cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) /* try and push anything in the part_buf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) if (unlikely(host->part_buf_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) int len = dw_mci_push_part_bytes(host, buf, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) buf += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) cnt -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) if (host->part_buf_count == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) mci_fifo_writew(host->fifo_reg, host->part_buf16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) host->part_buf_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) if (unlikely((unsigned long)buf & 0x1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) while (cnt >= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) u16 aligned_buf[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) int len = min(cnt & -2, (int)sizeof(aligned_buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) int items = len >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) /* memcpy from input buffer into aligned buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) memcpy(aligned_buf, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) buf += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) cnt -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) /* push data from aligned buffer into fifo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) for (i = 0; i < items; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) mci_fifo_writew(host->fifo_reg, aligned_buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) u16 *pdata = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) for (; cnt >= 2; cnt -= 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) mci_fifo_writew(host->fifo_reg, *pdata++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) buf = pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) /* put anything remaining in the part_buf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) if (cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) dw_mci_set_part_bytes(host, buf, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) /* Push data if we have reached the expected data length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) if ((data->bytes_xfered + init_cnt) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) (data->blksz * data->blocks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) mci_fifo_writew(host->fifo_reg, host->part_buf16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) if (unlikely((unsigned long)buf & 0x1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) while (cnt >= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) /* pull data from fifo into aligned buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) u16 aligned_buf[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) int len = min(cnt & -2, (int)sizeof(aligned_buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) int items = len >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) for (i = 0; i < items; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) aligned_buf[i] = mci_fifo_readw(host->fifo_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) /* memcpy from aligned buffer into output buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) memcpy(buf, aligned_buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) buf += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) cnt -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) u16 *pdata = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) for (; cnt >= 2; cnt -= 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) *pdata++ = mci_fifo_readw(host->fifo_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) buf = pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) if (cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) host->part_buf16 = mci_fifo_readw(host->fifo_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) dw_mci_pull_final_bytes(host, buf, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) struct mmc_data *data = host->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) int init_cnt = cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) /* try and push anything in the part_buf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) if (unlikely(host->part_buf_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) int len = dw_mci_push_part_bytes(host, buf, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) buf += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) cnt -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) if (host->part_buf_count == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) mci_fifo_writel(host->fifo_reg, host->part_buf32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) host->part_buf_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) if (unlikely((unsigned long)buf & 0x3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) while (cnt >= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) u32 aligned_buf[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) int len = min(cnt & -4, (int)sizeof(aligned_buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) int items = len >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) /* memcpy from input buffer into aligned buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) memcpy(aligned_buf, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) buf += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) cnt -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) /* push data from aligned buffer into fifo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) for (i = 0; i < items; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) mci_fifo_writel(host->fifo_reg, aligned_buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) u32 *pdata = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) for (; cnt >= 4; cnt -= 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) mci_fifo_writel(host->fifo_reg, *pdata++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) buf = pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) /* put anything remaining in the part_buf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) if (cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) dw_mci_set_part_bytes(host, buf, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) /* Push data if we have reached the expected data length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) if ((data->bytes_xfered + init_cnt) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) (data->blksz * data->blocks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) mci_fifo_writel(host->fifo_reg, host->part_buf32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) if (unlikely((unsigned long)buf & 0x3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) while (cnt >= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) /* pull data from fifo into aligned buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) u32 aligned_buf[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) int len = min(cnt & -4, (int)sizeof(aligned_buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) int items = len >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) for (i = 0; i < items; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) aligned_buf[i] = mci_fifo_readl(host->fifo_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) /* memcpy from aligned buffer into output buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) memcpy(buf, aligned_buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) buf += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) cnt -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) u32 *pdata = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) for (; cnt >= 4; cnt -= 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) *pdata++ = mci_fifo_readl(host->fifo_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) buf = pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) if (cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) host->part_buf32 = mci_fifo_readl(host->fifo_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) dw_mci_pull_final_bytes(host, buf, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) struct mmc_data *data = host->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) int init_cnt = cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) /* try and push anything in the part_buf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) if (unlikely(host->part_buf_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) int len = dw_mci_push_part_bytes(host, buf, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) buf += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) cnt -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) if (host->part_buf_count == 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) mci_fifo_writeq(host->fifo_reg, host->part_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) host->part_buf_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) if (unlikely((unsigned long)buf & 0x7)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) while (cnt >= 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) u64 aligned_buf[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) int len = min(cnt & -8, (int)sizeof(aligned_buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) int items = len >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) /* memcpy from input buffer into aligned buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) memcpy(aligned_buf, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) buf += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) cnt -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) /* push data from aligned buffer into fifo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) for (i = 0; i < items; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) mci_fifo_writeq(host->fifo_reg, aligned_buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) u64 *pdata = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) for (; cnt >= 8; cnt -= 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) mci_fifo_writeq(host->fifo_reg, *pdata++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) buf = pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) /* put anything remaining in the part_buf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) if (cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) dw_mci_set_part_bytes(host, buf, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) /* Push data if we have reached the expected data length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) if ((data->bytes_xfered + init_cnt) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) (data->blksz * data->blocks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) mci_fifo_writeq(host->fifo_reg, host->part_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) if (unlikely((unsigned long)buf & 0x7)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) while (cnt >= 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) /* pull data from fifo into aligned buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) u64 aligned_buf[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) int len = min(cnt & -8, (int)sizeof(aligned_buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) int items = len >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) for (i = 0; i < items; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) aligned_buf[i] = mci_fifo_readq(host->fifo_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) /* memcpy from aligned buffer into output buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) memcpy(buf, aligned_buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) buf += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) cnt -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) u64 *pdata = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) for (; cnt >= 8; cnt -= 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) *pdata++ = mci_fifo_readq(host->fifo_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) buf = pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) if (cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) host->part_buf = mci_fifo_readq(host->fifo_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) dw_mci_pull_final_bytes(host, buf, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) /* get remaining partial bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) len = dw_mci_pull_part_bytes(host, buf, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) if (unlikely(len == cnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) buf += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) cnt -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) /* get the rest of the data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) host->pull_data(host, buf, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) struct sg_mapping_iter *sg_miter = &host->sg_miter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) void *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) struct mmc_data *data = host->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) int shift = host->data_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) unsigned int remain, fcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) if (!sg_miter_next(sg_miter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) host->sg = sg_miter->piter.sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) buf = sg_miter->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) remain = sg_miter->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) << shift) + host->part_buf_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) len = min(remain, fcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) if (!len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) dw_mci_pull_data(host, (void *)(buf + offset), len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) data->bytes_xfered += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) offset += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) remain -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) } while (remain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) sg_miter->consumed = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) status = mci_readl(host, MINTSTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) /* if the RXDR is ready read again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) } while ((status & SDMMC_INT_RXDR) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) if (!remain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) if (!sg_miter_next(sg_miter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) sg_miter->consumed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) sg_miter_stop(sg_miter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) sg_miter_stop(sg_miter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) host->sg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) smp_wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) if (host->need_xfer_timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) del_timer(&host->xfer_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) static void dw_mci_write_data_pio(struct dw_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) struct sg_mapping_iter *sg_miter = &host->sg_miter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) void *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) struct mmc_data *data = host->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) int shift = host->data_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) unsigned int fifo_depth = host->fifo_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) unsigned int remain, fcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) if (!sg_miter_next(sg_miter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) host->sg = sg_miter->piter.sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) buf = sg_miter->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) remain = sg_miter->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) fcnt = ((fifo_depth -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) SDMMC_GET_FCNT(mci_readl(host, STATUS)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) << shift) - host->part_buf_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) len = min(remain, fcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) if (!len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) host->push_data(host, (void *)(buf + offset), len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) data->bytes_xfered += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) offset += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) remain -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) } while (remain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) sg_miter->consumed = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) status = mci_readl(host, MINTSTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) if (!remain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) if (!sg_miter_next(sg_miter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) sg_miter->consumed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) sg_miter_stop(sg_miter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) sg_miter_stop(sg_miter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) host->sg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) smp_wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) del_timer(&host->cto_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) if (!host->cmd_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) host->cmd_status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) smp_wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) tasklet_schedule(&host->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) static void dw_mci_handle_cd(struct dw_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) struct dw_mci_slot *slot = host->slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) if (slot->mmc->ops->card_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) slot->mmc->ops->card_event(slot->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) mmc_detect_change(slot->mmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) msecs_to_jiffies(host->pdata->detect_delay_ms));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) struct dw_mci *host = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) u32 pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) struct dw_mci_slot *slot = host->slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) pending = mci_readl(host, MINTSTS); /* read-only mask reg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) if (pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) /* Check volt switch first, since it can look like an error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) if ((host->state == STATE_SENDING_CMD11) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) (pending & SDMMC_INT_VOLT_SWITCH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) pending &= ~SDMMC_INT_VOLT_SWITCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) * Hold the lock; we know cmd11_timer can't be kicked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) * off after the lock is released, so safe to delete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) spin_lock_irqsave(&host->irq_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) dw_mci_cmd_interrupt(host, pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) spin_unlock_irqrestore(&host->irq_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) del_timer(&host->cmd11_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) if (pending & DW_MCI_CMD_ERROR_FLAGS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) spin_lock_irqsave(&host->irq_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) del_timer(&host->cto_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) host->cmd_status = pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) if ((host->need_xfer_timer) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) host->dir_status == DW_MCI_RECV_STATUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) del_timer(&host->xfer_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) smp_wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) spin_unlock_irqrestore(&host->irq_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) if (pending & DW_MCI_DATA_ERROR_FLAGS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) /* if there is an error report DATA_ERROR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) host->data_status = pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) smp_wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) set_bit(EVENT_DATA_ERROR, &host->pending_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) tasklet_schedule(&host->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) if (pending & SDMMC_INT_DATA_OVER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) rv1106_sd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) spin_lock_irqsave(&host->irq_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) del_timer(&host->dto_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) if (host->is_rv1106_sd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) pending |= SDMMC_INT_DATA_OVER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) if (!host->data_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) host->data_status = pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) smp_wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) if (host->dir_status == DW_MCI_RECV_STATUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) if (host->sg != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) dw_mci_read_data_pio(host, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) tasklet_schedule(&host->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) spin_unlock_irqrestore(&host->irq_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) if (pending & SDMMC_INT_RXDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) dw_mci_read_data_pio(host, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) if (pending & SDMMC_INT_TXDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) dw_mci_write_data_pio(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) if (pending & SDMMC_INT_CMD_DONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) spin_lock_irqsave(&host->irq_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) dw_mci_cmd_interrupt(host, pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) spin_unlock_irqrestore(&host->irq_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) if (pending & SDMMC_INT_CD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) mci_writel(host, RINTSTS, SDMMC_INT_CD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) dw_mci_handle_cd(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) mci_writel(host, RINTSTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) SDMMC_INT_SDIO(slot->sdio_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) __dw_mci_enable_sdio_irq(slot, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) sdio_signal_irq(slot->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) if (host->use_dma != TRANS_MODE_IDMAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) /* Handle IDMA interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) if (host->dma_64bit_address == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) pending = mci_readl(host, IDSTS64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) SDMMC_IDMAC_INT_RI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) host->dma_ops->complete((void *)host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) pending = mci_readl(host, IDSTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) SDMMC_IDMAC_INT_RI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) host->dma_ops->complete((void *)host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) if (host->is_rv1106_sd && (pending & SDMMC_IDMAC_INT_TI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) goto rv1106_sd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) static int dw_mci_init_slot_caps(struct dw_mci_slot *slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) struct dw_mci *host = slot->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) const struct dw_mci_drv_data *drv_data = host->drv_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) struct mmc_host *mmc = slot->mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) int ctrl_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) if (host->pdata->caps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) mmc->caps = host->pdata->caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) if (host->pdata->pm_caps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) mmc->pm_caps = host->pdata->pm_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) if (host->dev->of_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) if (ctrl_id < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) ctrl_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) ctrl_id = to_platform_device(host->dev)->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) if (drv_data && drv_data->caps) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) if (ctrl_id >= drv_data->num_caps) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) dev_err(host->dev, "invalid controller id %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) ctrl_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) mmc->caps |= drv_data->caps[ctrl_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) if (host->pdata->caps2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) mmc->caps2 = host->pdata->caps2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) mmc->f_min = DW_MCI_FREQ_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) if (!mmc->f_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) mmc->f_max = DW_MCI_FREQ_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) /* Process SDIO IRQs through the sdio_irq_work. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) if (mmc->caps & MMC_CAP_SDIO_IRQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) static int dw_mci_init_slot(struct dw_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) struct mmc_host *mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) struct dw_mci_slot *slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) if (!mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) slot = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) slot->id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) slot->sdio_id = host->sdio_id0 + slot->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) slot->mmc = mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) slot->host = host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) host->slot = slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) mmc->ops = &dw_mci_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) /*if there are external regulators, get them*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) ret = mmc_regulator_get_supply(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) goto err_host_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) if (!mmc->ocr_avail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) ret = mmc_of_parse(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) goto err_host_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) ret = dw_mci_init_slot_caps(slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) goto err_host_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) /* Useful defaults if platform data is unset. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) if (host->use_dma == TRANS_MODE_IDMAC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) /* Reserve last desc for dirty data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) if (host->is_rv1106_sd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) host->ring_size--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) mmc->max_segs = host->ring_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) mmc->max_blk_size = 65535;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) mmc->max_seg_size = 0x1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) mmc->max_req_size = mmc->max_seg_size * host->ring_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) mmc->max_blk_count = mmc->max_req_size / 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) } else if (host->use_dma == TRANS_MODE_EDMAC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) mmc->max_segs = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) mmc->max_blk_size = 65535;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) mmc->max_blk_count = 65535;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) mmc->max_req_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) mmc->max_blk_size * mmc->max_blk_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) mmc->max_seg_size = mmc->max_req_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) /* TRANS_MODE_PIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) mmc->max_segs = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) mmc->max_blk_count = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) mmc->max_req_size = mmc->max_blk_size *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) mmc->max_blk_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) mmc->max_seg_size = mmc->max_req_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) dw_mci_get_cd(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) ret = mmc_add_host(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) goto err_host_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) #if defined(CONFIG_DEBUG_FS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) dw_mci_init_debugfs(slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) err_host_allocated:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) mmc_free_host(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) static void dw_mci_cleanup_slot(struct dw_mci_slot *slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) /* Debugfs stuff is cleaned up by mmc core */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) mmc_remove_host(slot->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) slot->host->slot = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) mmc_free_host(slot->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) static void dw_mci_init_dma(struct dw_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) int addr_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) struct device *dev = host->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) * Check tansfer mode from HCON[17:16]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) * Clear the ambiguous description of dw_mmc databook:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) * 2b'00: No DMA Interface -> Actually means using Internal DMA block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) * 2b'11: Non DW DMA Interface -> pio only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) * Compared to DesignWare DMA Interface, Generic DMA Interface has a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) * simpler request/acknowledge handshake mechanism and both of them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) * are regarded as external dma master for dw_mmc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) if (host->use_dma == DMA_INTERFACE_IDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) host->use_dma = TRANS_MODE_IDMAC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) } else if (host->use_dma == DMA_INTERFACE_DWDMA ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) host->use_dma == DMA_INTERFACE_GDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) host->use_dma = TRANS_MODE_EDMAC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) goto no_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) /* Determine which DMA interface to use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) if (host->use_dma == TRANS_MODE_IDMAC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) * Check ADDR_CONFIG bit in HCON to find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) * IDMAC address bus width
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) if (addr_config == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) /* host supports IDMAC in 64-bit address mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) host->dma_64bit_address = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) dev_info(host->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) "IDMAC supports 64-bit address mode.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) dma_set_coherent_mask(host->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) DMA_BIT_MASK(64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) /* host supports IDMAC in 32-bit address mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) host->dma_64bit_address = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) dev_info(host->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) "IDMAC supports 32-bit address mode.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) /* Alloc memory for sg translation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) host->sg_cpu = dmam_alloc_coherent(host->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) DESC_RING_BUF_SZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) &host->sg_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) if (!host->sg_cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) dev_err(host->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) "%s: could not alloc DMA memory\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) goto no_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) host->dma_ops = &dw_mci_idmac_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) dev_info(host->dev, "Using internal DMA controller.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) /* TRANS_MODE_EDMAC: check dma bindings again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) if ((device_property_read_string_array(dev, "dma-names",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) NULL, 0) < 0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) !device_property_present(dev, "dmas")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) goto no_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) host->dma_ops = &dw_mci_edmac_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) dev_info(host->dev, "Using external DMA controller.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) if (host->dma_ops->init && host->dma_ops->start &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) host->dma_ops->stop && host->dma_ops->cleanup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) if (host->dma_ops->init(host)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) goto no_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) dev_err(host->dev, "DMA initialization not found.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) goto no_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) no_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) dev_info(host->dev, "Using PIO mode.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) host->use_dma = TRANS_MODE_PIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) static void dw_mci_cmd11_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) struct dw_mci *host = from_timer(host, t, cmd11_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) if (host->state != STATE_SENDING_CMD11) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) dev_warn(host->dev, "Unexpected CMD11 timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) host->cmd_status = SDMMC_INT_RTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) tasklet_schedule(&host->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) static void dw_mci_cto_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) struct dw_mci *host = from_timer(host, t, cto_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) u32 pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) spin_lock_irqsave(&host->irq_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) * If somehow we have very bad interrupt latency it's remotely possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) * that the timer could fire while the interrupt is still pending or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) * while the interrupt is midway through running. Let's be paranoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) * and detect those two cases. Note that this is paranoia is somewhat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) * justified because in this function we don't actually cancel the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) * pending command in the controller--we just assume it will never come.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) pending = mci_readl(host, MINTSTS); /* read-only mask reg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) if (pending & (DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_CMD_DONE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) /* The interrupt should fire; no need to act but we can warn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) dev_warn(host->dev, "Unexpected interrupt latency\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) if (test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) /* Presumably interrupt handler couldn't delete the timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) dev_warn(host->dev, "CTO timeout when already completed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) * Continued paranoia to make sure we're in the state we expect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) * This paranoia isn't really justified but it seems good to be safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) switch (host->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) case STATE_SENDING_CMD11:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) case STATE_SENDING_CMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) case STATE_SENDING_STOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) * If CMD_DONE interrupt does NOT come in sending command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) * state, we should notify the driver to terminate current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) * transfer and report a command timeout to the core.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) host->cmd_status = SDMMC_INT_RTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) tasklet_schedule(&host->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) dev_warn(host->dev, "Unexpected command timeout, state %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) host->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) spin_unlock_irqrestore(&host->irq_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) static void dw_mci_xfer_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) struct dw_mci *host = from_timer(host, t, xfer_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) spin_lock_irqsave(&host->irq_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) if (test_bit(EVENT_XFER_COMPLETE, &host->pending_events)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) /* Presumably interrupt handler couldn't delete the timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) dev_warn(host->dev, "xfer when already completed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) switch (host->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) case STATE_SENDING_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) host->data_status = SDMMC_INT_DRTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) set_bit(EVENT_DATA_ERROR, &host->pending_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) tasklet_schedule(&host->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) dev_warn(host->dev, "Unexpected xfer timeout, state %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) host->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) spin_unlock_irqrestore(&host->irq_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) static void dw_mci_dto_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) struct dw_mci *host = from_timer(host, t, dto_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) u32 pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) spin_lock_irqsave(&host->irq_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) * The DTO timer is much longer than the CTO timer, so it's even less
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) * likely that we'll these cases, but it pays to be paranoid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) pending = mci_readl(host, MINTSTS); /* read-only mask reg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) if (pending & SDMMC_INT_DATA_OVER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) /* The interrupt should fire; no need to act but we can warn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) dev_warn(host->dev, "Unexpected data interrupt latency\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) if (test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) /* Presumably interrupt handler couldn't delete the timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) dev_warn(host->dev, "DTO timeout when already completed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) * Continued paranoia to make sure we're in the state we expect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) * This paranoia isn't really justified but it seems good to be safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) switch (host->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) case STATE_SENDING_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) case STATE_DATA_BUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) * If DTO interrupt does NOT come in sending data state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) * we should notify the driver to terminate current transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) * and report a data timeout to the core.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) host->data_status = SDMMC_INT_DRTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) set_bit(EVENT_DATA_ERROR, &host->pending_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) tasklet_schedule(&host->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) dev_warn(host->dev, "Unexpected data timeout, state %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) host->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) spin_unlock_irqrestore(&host->irq_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) #ifdef CONFIG_OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) struct dw_mci_board *pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) struct device *dev = host->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) const struct dw_mci_drv_data *drv_data = host->drv_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) u32 clock_frequency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) if (!pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) /* find reset controller when exist */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) pdata->rstc = devm_reset_control_get_optional_exclusive(dev, "reset");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) if (IS_ERR(pdata->rstc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) if (PTR_ERR(pdata->rstc) == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) return ERR_PTR(-EPROBE_DEFER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) dev_info(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) "fifo-depth property not found, using value of FIFOTH register as default\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) device_property_read_u32(dev, "card-detect-delay",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) &pdata->detect_delay_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) device_property_read_u32(dev, "data-addr", &host->data_addr_override);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) if (device_property_present(dev, "fifo-watermark-aligned"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) host->wm_aligned = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) pdata->bus_hz = clock_frequency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) if (drv_data && drv_data->parse_dt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) ret = drv_data->parse_dt(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) return pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) #else /* CONFIG_OF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) #endif /* CONFIG_OF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) static void dw_mci_enable_cd(struct dw_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) u32 temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) * No need for CD if all slots have a non-error GPIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) * as well as broken card detection is found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) if (host->slot->mmc->caps & MMC_CAP_NEEDS_POLL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) if (mmc_gpio_get_cd(host->slot->mmc) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) spin_lock_irqsave(&host->irq_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) temp = mci_readl(host, INTMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) temp |= SDMMC_INT_CD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) mci_writel(host, INTMASK, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) spin_unlock_irqrestore(&host->irq_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) int dw_mci_probe(struct dw_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) const struct dw_mci_drv_data *drv_data = host->drv_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) int width, i, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) u32 fifo_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) if (!host->pdata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) host->pdata = dw_mci_parse_dt(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) if (IS_ERR(host->pdata))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) return dev_err_probe(host->dev, PTR_ERR(host->pdata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) "platform data not available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) host->biu_clk = devm_clk_get(host->dev, "biu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) if (IS_ERR(host->biu_clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) dev_dbg(host->dev, "biu clock not available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) ret = clk_prepare_enable(host->biu_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) dev_err(host->dev, "failed to enable biu clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) #ifdef CONFIG_ROCKCHIP_THUNDER_BOOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) if (device_property_read_bool(host->dev, "no-sd") &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) device_property_read_bool(host->dev, "no-sdio")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) if (readl_poll_timeout(host->regs + SDMMC_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) fifo_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) !(fifo_size & (BIT(10) | GENMASK(7, 4))),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) 0, 500 * USEC_PER_MSEC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) dev_err(host->dev, "Controller is occupied!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) if (readl_poll_timeout(host->regs + SDMMC_IDSTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) fifo_size, !(fifo_size & GENMASK(16, 13)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) 0, 500 * USEC_PER_MSEC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) dev_err(host->dev, "DMA is still running!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) BUG_ON(mci_readl(host, RINTSTS) & DW_MCI_ERROR_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) host->ciu_clk = devm_clk_get(host->dev, "ciu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) if (IS_ERR(host->ciu_clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) dev_dbg(host->dev, "ciu clock not available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) host->bus_hz = host->pdata->bus_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) ret = clk_prepare_enable(host->ciu_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) dev_err(host->dev, "failed to enable ciu clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) goto err_clk_biu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) if (host->pdata->bus_hz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) dev_warn(host->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) "Unable to set bus rate to %uHz\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) host->pdata->bus_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) host->bus_hz = clk_get_rate(host->ciu_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) if (!host->bus_hz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) dev_err(host->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) "Platform data must supply bus speed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) goto err_clk_ciu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) if (!IS_ERR(host->pdata->rstc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) reset_control_assert(host->pdata->rstc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) usleep_range(10, 50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) reset_control_deassert(host->pdata->rstc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) if (drv_data && drv_data->init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) ret = drv_data->init(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) dev_err(host->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) "implementation specific init failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) goto err_clk_ciu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) timer_setup(&host->cmd11_timer, dw_mci_cmd11_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) timer_setup(&host->cto_timer, dw_mci_cto_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) timer_setup(&host->dto_timer, dw_mci_dto_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) if (host->need_xfer_timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) timer_setup(&host->xfer_timer, dw_mci_xfer_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) spin_lock_init(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) spin_lock_init(&host->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) INIT_LIST_HEAD(&host->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) * Get the host data width - this assumes that HCON has been set with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) * the correct values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) if (!i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) host->push_data = dw_mci_push_data16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) host->pull_data = dw_mci_pull_data16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) width = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) host->data_shift = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) } else if (i == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) host->push_data = dw_mci_push_data64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) host->pull_data = dw_mci_pull_data64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) width = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) host->data_shift = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) /* Check for a reserved value, and warn if it is */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) WARN((i != 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) "HCON reports a reserved host data width!\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) "Defaulting to 32-bit access.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) host->push_data = dw_mci_push_data32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) host->pull_data = dw_mci_pull_data32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) width = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) host->data_shift = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) /* Reset all blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) goto err_clk_ciu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) host->dma_ops = host->pdata->dma_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) dw_mci_init_dma(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) /* Clear the interrupts for the host controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) mci_writel(host, RINTSTS, 0xFFFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) /* Put in max timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) mci_writel(host, TMOUT, 0xFFFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) * FIFO threshold settings RxMark = fifo_size / 2 - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) * Tx Mark = fifo_size / 2 DMA Size = 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) if (!host->pdata->fifo_depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) * have been overwritten by the bootloader, just like we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) * about to do, so if you know the value for your hardware, you
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) * should put it in the platform data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) fifo_size = mci_readl(host, FIFOTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) fifo_size = host->pdata->fifo_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) host->fifo_depth = fifo_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) host->fifoth_val =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) mci_writel(host, FIFOTH, host->fifoth_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) /* disable clock to CIU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) mci_writel(host, CLKENA, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) mci_writel(host, CLKSRC, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) * In 2.40a spec, Data offset is changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) * Need to check the version-id and set data-offset for DATA register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) dev_info(host->dev, "Version ID is %04x\n", host->verid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) if (host->data_addr_override)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) host->fifo_reg = host->regs + host->data_addr_override;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) else if (host->verid < DW_MMC_240A)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) host->fifo_reg = host->regs + DATA_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) host->fifo_reg = host->regs + DATA_240A_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) host->irq_flags, "dw-mci", host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) goto err_dmaunmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) * Enable interrupts for command done, data over, data empty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) * receive ready and error such as transmit, receive timeout, crc error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) SDMMC_INT_TXDR | SDMMC_INT_RXDR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) DW_MCI_ERROR_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) /* Enable mci interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) dev_info(host->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) host->irq, width, fifo_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) /* We need at least one slot to succeed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) ret = dw_mci_init_slot(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) dev_dbg(host->dev, "slot %d init failed\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) goto err_dmaunmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) if (host->is_rv1106_sd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) #if IS_ENABLED(CONFIG_CPU_RV1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) g_sdmmc_ispvicap_lock = &host->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) /* Select IDMAC interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) fifo_size = mci_readl(host, CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) fifo_size |= SDMMC_CTRL_USE_IDMAC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) mci_writel(host, CTRL, fifo_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) fifo_size = mci_readl(host, INTMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) fifo_size &= ~SDMMC_INT_HTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) mci_writel(host, INTMASK, fifo_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) host->slot->mmc->caps &= ~(MMC_CAP_UHS_DDR50 | MMC_CAP_UHS_SDR104 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) MMC_CAP_UHS_SDR12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) /* Now that slots are all setup, we can enable card detect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) dw_mci_enable_cd(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) err_dmaunmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) if (host->use_dma && host->dma_ops->exit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) host->dma_ops->exit(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) if (!IS_ERR(host->pdata->rstc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) reset_control_assert(host->pdata->rstc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) err_clk_ciu:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) clk_disable_unprepare(host->ciu_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) err_clk_biu:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) clk_disable_unprepare(host->biu_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) EXPORT_SYMBOL(dw_mci_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) void dw_mci_remove(struct dw_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) dev_dbg(host->dev, "remove slot\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) if (host->slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) dw_mci_cleanup_slot(host->slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) mci_writel(host, RINTSTS, 0xFFFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) /* disable clock to CIU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) mci_writel(host, CLKENA, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) mci_writel(host, CLKSRC, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) if (host->use_dma && host->dma_ops->exit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) host->dma_ops->exit(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) if (!IS_ERR(host->pdata->rstc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) reset_control_assert(host->pdata->rstc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) clk_disable_unprepare(host->ciu_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) clk_disable_unprepare(host->biu_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) EXPORT_SYMBOL(dw_mci_remove);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) int dw_mci_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) struct dw_mci *host = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) if (host->use_dma && host->dma_ops->exit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) host->dma_ops->exit(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) clk_disable_unprepare(host->ciu_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) if (host->slot &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) (mmc_can_gpio_cd(host->slot->mmc) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) !mmc_card_is_removable(host->slot->mmc)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) clk_disable_unprepare(host->biu_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) EXPORT_SYMBOL(dw_mci_runtime_suspend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) int dw_mci_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) struct dw_mci *host = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) if (host->slot &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) (mmc_can_gpio_cd(host->slot->mmc) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) !mmc_card_is_removable(host->slot->mmc))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) ret = clk_prepare_enable(host->biu_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) ret = clk_prepare_enable(host->ciu_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) clk_disable_unprepare(host->ciu_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) if (host->use_dma && host->dma_ops->init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) host->dma_ops->init(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) * Restore the initial value at FIFOTH register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) * And Invalidate the prev_blksz with zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) mci_writel(host, FIFOTH, host->fifoth_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) host->prev_blksz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) /* Put in max timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) mci_writel(host, TMOUT, 0xFFFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) mci_writel(host, RINTSTS, 0xFFFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) SDMMC_INT_TXDR | SDMMC_INT_RXDR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) DW_MCI_ERROR_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) if (host->is_rv1106_sd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) /* Select IDMAC interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) ret = mci_readl(host, CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) ret |= SDMMC_CTRL_USE_IDMAC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) mci_writel(host, CTRL, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) ret = mci_readl(host, INTMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) ret &= ~SDMMC_INT_HTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) mci_writel(host, INTMASK, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) if (host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) dw_mci_set_ios(host->slot->mmc, &host->slot->mmc->ios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) /* Force setup bus to guarantee available clock output */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) dw_mci_setup_bus(host->slot, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) /* Re-enable SDIO interrupts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) if (sdio_irq_claimed(host->slot->mmc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) __dw_mci_enable_sdio_irq(host->slot, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) /* Now that slots are all setup, we can enable card detect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) dw_mci_enable_cd(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) if (host->slot &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) (mmc_can_gpio_cd(host->slot->mmc) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) !mmc_card_is_removable(host->slot->mmc)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) clk_disable_unprepare(host->biu_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) EXPORT_SYMBOL(dw_mci_runtime_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) #endif /* CONFIG_PM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) static int __init dw_mci_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) static void __exit dw_mci_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) module_init(dw_mci_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) module_exit(dw_mci_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) MODULE_AUTHOR("NXP Semiconductor VietNam");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) MODULE_AUTHOR("Imagination Technologies Ltd");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) MODULE_LICENSE("GPL v2");