^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2013, Imagination Technologies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * JZ4740 SD/MMC controller driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/mmc/host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/mmc/slot-gpio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/pinctrl/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define JZ_REG_MMC_STRPCL 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define JZ_REG_MMC_STATUS 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define JZ_REG_MMC_CLKRT 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define JZ_REG_MMC_CMDAT 0x0C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define JZ_REG_MMC_RESTO 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define JZ_REG_MMC_RDTO 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define JZ_REG_MMC_BLKLEN 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define JZ_REG_MMC_NOB 0x1C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define JZ_REG_MMC_SNOB 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define JZ_REG_MMC_IMASK 0x24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define JZ_REG_MMC_IREG 0x28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define JZ_REG_MMC_CMD 0x2C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define JZ_REG_MMC_ARG 0x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define JZ_REG_MMC_RESP_FIFO 0x34
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define JZ_REG_MMC_RXFIFO 0x38
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define JZ_REG_MMC_TXFIFO 0x3C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define JZ_REG_MMC_LPM 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define JZ_REG_MMC_DMAC 0x44
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define JZ_MMC_STRPCL_EXIT_MULTIPLE BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define JZ_MMC_STRPCL_EXIT_TRANSFER BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define JZ_MMC_STRPCL_START_READWAIT BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define JZ_MMC_STRPCL_STOP_READWAIT BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define JZ_MMC_STRPCL_RESET BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define JZ_MMC_STRPCL_START_OP BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define JZ_MMC_STRPCL_CLOCK_CONTROL (BIT(1) | BIT(0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define JZ_MMC_STRPCL_CLOCK_STOP BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define JZ_MMC_STRPCL_CLOCK_START BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define JZ_MMC_STATUS_IS_RESETTING BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define JZ_MMC_STATUS_SDIO_INT_ACTIVE BIT(14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define JZ_MMC_STATUS_PRG_DONE BIT(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define JZ_MMC_STATUS_DATA_TRAN_DONE BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define JZ_MMC_STATUS_END_CMD_RES BIT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define JZ_MMC_STATUS_DATA_FIFO_AFULL BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define JZ_MMC_STATUS_IS_READWAIT BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define JZ_MMC_STATUS_CLK_EN BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define JZ_MMC_STATUS_DATA_FIFO_FULL BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define JZ_MMC_STATUS_DATA_FIFO_EMPTY BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define JZ_MMC_STATUS_CRC_RES_ERR BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define JZ_MMC_STATUS_CRC_READ_ERROR BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define JZ_MMC_STATUS_TIMEOUT_WRITE BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define JZ_MMC_STATUS_CRC_WRITE_ERROR BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define JZ_MMC_STATUS_TIMEOUT_RES BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define JZ_MMC_STATUS_TIMEOUT_READ BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define JZ_MMC_STATUS_READ_ERROR_MASK (BIT(4) | BIT(0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define JZ_MMC_STATUS_WRITE_ERROR_MASK (BIT(3) | BIT(2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define JZ_MMC_CMDAT_IO_ABORT BIT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define JZ_MMC_CMDAT_BUS_WIDTH_4BIT BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define JZ_MMC_CMDAT_BUS_WIDTH_8BIT (BIT(10) | BIT(9))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define JZ_MMC_CMDAT_BUS_WIDTH_MASK (BIT(10) | BIT(9))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define JZ_MMC_CMDAT_DMA_EN BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define JZ_MMC_CMDAT_INIT BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define JZ_MMC_CMDAT_BUSY BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define JZ_MMC_CMDAT_STREAM BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define JZ_MMC_CMDAT_WRITE BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define JZ_MMC_CMDAT_DATA_EN BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define JZ_MMC_CMDAT_RESPONSE_FORMAT (BIT(2) | BIT(1) | BIT(0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define JZ_MMC_CMDAT_RSP_R1 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define JZ_MMC_CMDAT_RSP_R2 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define JZ_MMC_CMDAT_RSP_R3 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define JZ_MMC_IRQ_SDIO BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define JZ_MMC_IRQ_TXFIFO_WR_REQ BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define JZ_MMC_IRQ_RXFIFO_RD_REQ BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define JZ_MMC_IRQ_END_CMD_RES BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define JZ_MMC_IRQ_PRG_DONE BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define JZ_MMC_IRQ_DATA_TRAN_DONE BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define JZ_MMC_DMAC_DMA_SEL BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define JZ_MMC_DMAC_DMA_EN BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define JZ_MMC_LPM_DRV_RISING BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define JZ_MMC_LPM_DRV_RISING_1NS_DLY BIT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY BIT(29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define JZ_MMC_LPM_LOW_POWER_MODE_EN BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define JZ_MMC_CLK_RATE 24000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define JZ_MMC_REQ_TIMEOUT_MS 5000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) enum jz4740_mmc_version {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) JZ_MMC_JZ4740,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) JZ_MMC_JZ4725B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) JZ_MMC_JZ4760,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) JZ_MMC_JZ4780,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) JZ_MMC_X1000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) enum jz4740_mmc_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) JZ4740_MMC_STATE_READ_RESPONSE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) JZ4740_MMC_STATE_TRANSFER_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) JZ4740_MMC_STATE_SEND_STOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) JZ4740_MMC_STATE_DONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * The MMC core allows to prepare a mmc_request while another mmc_request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * is in-flight. This is used via the pre_req/post_req hooks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * This driver uses the pre_req/post_req hooks to map/unmap the mmc_request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * Following what other drivers do (sdhci, dw_mmc) we use the following cookie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * flags to keep track of the mmc_request mapping state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * COOKIE_UNMAPPED: the request is not mapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * COOKIE_PREMAPPED: the request was mapped in pre_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * and should be unmapped in post_req.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * COOKIE_MAPPED: the request was mapped in the irq handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * and should be unmapped before mmc_request_done is called..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) enum jz4780_cookie {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) COOKIE_UNMAPPED = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) COOKIE_PREMAPPED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) COOKIE_MAPPED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct jz4740_mmc_host {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct mmc_host *mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct platform_device *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) enum jz4740_mmc_version version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) int card_detect_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct resource *mem_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct mmc_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct mmc_command *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) unsigned long waiting;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) uint32_t cmdat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) uint32_t irq_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct timer_list timeout_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct sg_mapping_iter miter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) enum jz4740_mmc_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /* DMA support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct dma_chan *dma_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct dma_chan *dma_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) bool use_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* The DMA trigger level is 8 words, that is to say, the DMA read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * trigger is when data words in MSC_RXFIFO is >= 8 and the DMA write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * trigger is when data words in MSC_TXFIFO is < 8.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #define JZ4740_MMC_FIFO_HALF_SIZE 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static void jz4740_mmc_write_irq_mask(struct jz4740_mmc_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) uint32_t val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (host->version >= JZ_MMC_JZ4725B)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return writel(val, host->base + JZ_REG_MMC_IMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return writew(val, host->base + JZ_REG_MMC_IMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static void jz4740_mmc_write_irq_reg(struct jz4740_mmc_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) uint32_t val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (host->version >= JZ_MMC_JZ4780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) writel(val, host->base + JZ_REG_MMC_IREG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) writew(val, host->base + JZ_REG_MMC_IREG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static uint32_t jz4740_mmc_read_irq_reg(struct jz4740_mmc_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (host->version >= JZ_MMC_JZ4780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return readl(host->base + JZ_REG_MMC_IREG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return readw(host->base + JZ_REG_MMC_IREG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /*----------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /* DMA infrastructure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static void jz4740_mmc_release_dma_channels(struct jz4740_mmc_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (!host->use_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) dma_release_channel(host->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) dma_release_channel(host->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (IS_ERR(host->dma_tx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) dev_err(mmc_dev(host->mmc), "Failed to get dma_tx channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return PTR_ERR(host->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) host->dma_rx = dma_request_chan(mmc_dev(host->mmc), "rx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (IS_ERR(host->dma_rx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) dev_err(mmc_dev(host->mmc), "Failed to get dma_rx channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) dma_release_channel(host->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return PTR_ERR(host->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static inline struct dma_chan *jz4740_mmc_get_dma_chan(struct jz4740_mmc_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return (data->flags & MMC_DATA_READ) ? host->dma_rx : host->dma_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) enum dma_data_direction dir = mmc_get_dma_dir(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) data->host_cookie = COOKIE_UNMAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) /* Prepares DMA data for current or next transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * A request can be in-flight when this is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct mmc_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) int cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) enum dma_data_direction dir = mmc_get_dma_dir(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) int sg_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (data->host_cookie == COOKIE_PREMAPPED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return data->sg_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) sg_count = dma_map_sg(chan->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) data->sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) data->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (sg_count <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) dev_err(mmc_dev(host->mmc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) "Failed to map scatterlist for DMA operation\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) data->sg_count = sg_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) data->host_cookie = cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return data->sg_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct dma_async_tx_descriptor *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct dma_slave_config conf = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) .src_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) .dst_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) int sg_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (data->flags & MMC_DATA_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) conf.direction = DMA_MEM_TO_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) conf.direction = DMA_DEV_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) conf.src_addr = host->mem_res->start + JZ_REG_MMC_RXFIFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) sg_count = jz4740_mmc_prepare_dma_data(host, data, COOKIE_MAPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (sg_count < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return sg_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) dmaengine_slave_config(chan, &conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) desc = dmaengine_prep_slave_sg(chan, data->sg, sg_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) conf.direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) dev_err(mmc_dev(host->mmc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) "Failed to allocate DMA %s descriptor",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) conf.direction == DMA_MEM_TO_DEV ? "TX" : "RX");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) goto dma_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) dmaengine_submit(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) dma_async_issue_pending(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) dma_unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (data->host_cookie == COOKIE_MAPPED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) jz4740_mmc_dma_unmap(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) static void jz4740_mmc_pre_request(struct mmc_host *mmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct jz4740_mmc_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) struct mmc_data *data = mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (!host->use_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) data->host_cookie = COOKIE_UNMAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (jz4740_mmc_prepare_dma_data(host, data, COOKIE_PREMAPPED) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) data->host_cookie = COOKIE_UNMAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) static void jz4740_mmc_post_request(struct mmc_host *mmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct mmc_request *mrq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct jz4740_mmc_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct mmc_data *data = mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (data && data->host_cookie != COOKIE_UNMAPPED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) jz4740_mmc_dma_unmap(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) dmaengine_terminate_all(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /*----------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) unsigned int irq, bool enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) host->irq_mask &= ~irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) host->irq_mask |= irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) jz4740_mmc_write_irq_mask(host, host->irq_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) bool start_transfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) uint16_t val = JZ_MMC_STRPCL_CLOCK_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (start_transfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) val |= JZ_MMC_STRPCL_START_OP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) writew(val, host->base + JZ_REG_MMC_STRPCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) static void jz4740_mmc_clock_disable(struct jz4740_mmc_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) uint32_t status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) unsigned int timeout = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) writew(JZ_MMC_STRPCL_CLOCK_STOP, host->base + JZ_REG_MMC_STRPCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) status = readl(host->base + JZ_REG_MMC_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) } while (status & JZ_MMC_STATUS_CLK_EN && --timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) static void jz4740_mmc_reset(struct jz4740_mmc_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) uint32_t status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) unsigned int timeout = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) writew(JZ_MMC_STRPCL_RESET, host->base + JZ_REG_MMC_STRPCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) status = readl(host->base + JZ_REG_MMC_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) } while (status & JZ_MMC_STATUS_IS_RESETTING && --timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) static void jz4740_mmc_request_done(struct jz4740_mmc_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct mmc_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) struct mmc_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) req = host->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) data = req->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) host->req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (data && data->host_cookie == COOKIE_MAPPED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) jz4740_mmc_dma_unmap(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) mmc_request_done(host->mmc, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static unsigned int jz4740_mmc_poll_irq(struct jz4740_mmc_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) unsigned int timeout = 0x800;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) uint32_t status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) status = jz4740_mmc_read_irq_reg(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) } while (!(status & irq) && --timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (timeout == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) set_bit(0, &host->waiting);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) mod_timer(&host->timeout_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) jiffies + msecs_to_jiffies(JZ_MMC_REQ_TIMEOUT_MS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) jz4740_mmc_set_irq_enabled(host, irq, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) static void jz4740_mmc_transfer_check_state(struct jz4740_mmc_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) status = readl(host->base + JZ_REG_MMC_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (status & JZ_MMC_STATUS_WRITE_ERROR_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (status & (JZ_MMC_STATUS_TIMEOUT_WRITE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) host->req->cmd->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) data->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) host->req->cmd->error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) data->error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) } else if (status & JZ_MMC_STATUS_READ_ERROR_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (status & (JZ_MMC_STATUS_TIMEOUT_READ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) host->req->cmd->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) data->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) host->req->cmd->error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) data->error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) static bool jz4740_mmc_write_data(struct jz4740_mmc_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct sg_mapping_iter *miter = &host->miter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) void __iomem *fifo_addr = host->base + JZ_REG_MMC_TXFIFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) uint32_t *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) bool timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) size_t i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) while (sg_miter_next(miter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) buf = miter->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) i = miter->length / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) j = i / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) i = i & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) while (j) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (unlikely(timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) goto poll_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) writel(buf[0], fifo_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) writel(buf[1], fifo_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) writel(buf[2], fifo_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) writel(buf[3], fifo_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) writel(buf[4], fifo_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) writel(buf[5], fifo_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) writel(buf[6], fifo_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) writel(buf[7], fifo_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) buf += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) --j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (unlikely(i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (unlikely(timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) goto poll_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) while (i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) writel(*buf, fifo_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) ++buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) --i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) data->bytes_xfered += miter->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) sg_miter_stop(miter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) poll_timeout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) miter->consumed = (void *)buf - miter->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) data->bytes_xfered += miter->consumed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) sg_miter_stop(miter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static bool jz4740_mmc_read_data(struct jz4740_mmc_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct sg_mapping_iter *miter = &host->miter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) void __iomem *fifo_addr = host->base + JZ_REG_MMC_RXFIFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) uint32_t *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) uint32_t d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) uint32_t status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) size_t i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) unsigned int timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) while (sg_miter_next(miter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) buf = miter->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) i = miter->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) j = i / 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) i = i & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) while (j) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (unlikely(timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) goto poll_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) buf[0] = readl(fifo_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) buf[1] = readl(fifo_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) buf[2] = readl(fifo_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) buf[3] = readl(fifo_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) buf[4] = readl(fifo_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) buf[5] = readl(fifo_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) buf[6] = readl(fifo_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) buf[7] = readl(fifo_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) buf += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) --j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (unlikely(i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (unlikely(timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) goto poll_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) while (i >= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) *buf++ = readl(fifo_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) i -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (unlikely(i > 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) d = readl(fifo_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) memcpy(buf, &d, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) data->bytes_xfered += miter->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) /* This can go away once MIPS implements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * flush_kernel_dcache_page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) flush_dcache_page(miter->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) sg_miter_stop(miter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) /* For whatever reason there is sometime one word more in the fifo then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * requested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) timeout = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) status = readl(host->base + JZ_REG_MMC_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) while (!(status & JZ_MMC_STATUS_DATA_FIFO_EMPTY) && --timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) d = readl(fifo_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) status = readl(host->base + JZ_REG_MMC_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) poll_timeout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) miter->consumed = (void *)buf - miter->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) data->bytes_xfered += miter->consumed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) sg_miter_stop(miter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) static void jz4740_mmc_timeout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) struct jz4740_mmc_host *host = from_timer(host, t, timeout_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (!test_and_clear_bit(0, &host->waiting))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) host->req->cmd->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) jz4740_mmc_request_done(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) static void jz4740_mmc_read_response(struct jz4740_mmc_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) uint16_t tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) void __iomem *fifo_addr = host->base + JZ_REG_MMC_RESP_FIFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (cmd->flags & MMC_RSP_136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) tmp = readw(fifo_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) for (i = 0; i < 4; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) cmd->resp[i] = tmp << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) tmp = readw(fifo_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) cmd->resp[i] |= tmp << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) tmp = readw(fifo_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) cmd->resp[i] |= tmp >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) cmd->resp[0] = readw(fifo_addr) << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) cmd->resp[0] |= readw(fifo_addr) << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) cmd->resp[0] |= readw(fifo_addr) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) static void jz4740_mmc_send_command(struct jz4740_mmc_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) uint32_t cmdat = host->cmdat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) host->cmdat &= ~JZ_MMC_CMDAT_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) jz4740_mmc_clock_disable(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) host->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (cmd->flags & MMC_RSP_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) cmdat |= JZ_MMC_CMDAT_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) switch (mmc_resp_type(cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) case MMC_RSP_R1B:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) case MMC_RSP_R1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) cmdat |= JZ_MMC_CMDAT_RSP_R1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) case MMC_RSP_R2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) cmdat |= JZ_MMC_CMDAT_RSP_R2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) case MMC_RSP_R3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) cmdat |= JZ_MMC_CMDAT_RSP_R3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (cmd->data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) cmdat |= JZ_MMC_CMDAT_DATA_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (cmd->data->flags & MMC_DATA_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) cmdat |= JZ_MMC_CMDAT_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (host->use_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * The 4780's MMC controller has integrated DMA ability
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * in addition to being able to use the external DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * controller. It moves DMA control bits to a separate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * register. The DMA_SEL bit chooses the external
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * controller over the integrated one. Earlier SoCs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * can only use the external controller, and have a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * single DMA enable bit in CMDAT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (host->version >= JZ_MMC_JZ4780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) writel(JZ_MMC_DMAC_DMA_EN | JZ_MMC_DMAC_DMA_SEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) host->base + JZ_REG_MMC_DMAC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) cmdat |= JZ_MMC_CMDAT_DMA_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) } else if (host->version >= JZ_MMC_JZ4780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) writel(0, host->base + JZ_REG_MMC_DMAC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) writew(cmd->data->blksz, host->base + JZ_REG_MMC_BLKLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) writew(cmd->data->blocks, host->base + JZ_REG_MMC_NOB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) writeb(cmd->opcode, host->base + JZ_REG_MMC_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) writel(cmd->arg, host->base + JZ_REG_MMC_ARG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) writel(cmdat, host->base + JZ_REG_MMC_CMDAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) jz4740_mmc_clock_enable(host, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) static void jz_mmc_prepare_data_transfer(struct jz4740_mmc_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) struct mmc_command *cmd = host->req->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) struct mmc_data *data = cmd->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) int direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (data->flags & MMC_DATA_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) direction = SG_MITER_TO_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) direction = SG_MITER_FROM_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) sg_miter_start(&host->miter, data->sg, data->sg_len, direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) static irqreturn_t jz_mmc_irq_worker(int irq, void *devid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) struct mmc_command *cmd = host->req->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) struct mmc_request *req = host->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) struct mmc_data *data = cmd->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) bool timeout = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (cmd->error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) host->state = JZ4740_MMC_STATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) switch (host->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) case JZ4740_MMC_STATE_READ_RESPONSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (cmd->flags & MMC_RSP_PRESENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) jz4740_mmc_read_response(host, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) jz_mmc_prepare_data_transfer(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) case JZ4740_MMC_STATE_TRANSFER_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (host->use_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) /* Use DMA if enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * Data transfer direction is defined later by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * relying on data flags in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * jz4740_mmc_prepare_dma_data() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * jz4740_mmc_start_dma_transfer().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) timeout = jz4740_mmc_start_dma_transfer(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) data->bytes_xfered = data->blocks * data->blksz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) } else if (data->flags & MMC_DATA_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) /* Use PIO if DMA is not enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * Data transfer direction was defined before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * by relying on data flags in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * jz_mmc_prepare_data_transfer().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) timeout = jz4740_mmc_read_data(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) timeout = jz4740_mmc_write_data(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) if (unlikely(timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) host->state = JZ4740_MMC_STATE_TRANSFER_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) jz4740_mmc_transfer_check_state(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_DATA_TRAN_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (unlikely(timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) host->state = JZ4740_MMC_STATE_SEND_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_DATA_TRAN_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) case JZ4740_MMC_STATE_SEND_STOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (!req->stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) jz4740_mmc_send_command(host, req->stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (mmc_resp_type(req->stop) & MMC_RSP_BUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) timeout = jz4740_mmc_poll_irq(host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) JZ_MMC_IRQ_PRG_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) host->state = JZ4740_MMC_STATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) case JZ4740_MMC_STATE_DONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (!timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) jz4740_mmc_request_done(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) static irqreturn_t jz_mmc_irq(int irq, void *devid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) struct jz4740_mmc_host *host = devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) struct mmc_command *cmd = host->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) uint32_t irq_reg, status, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) status = readl(host->base + JZ_REG_MMC_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) irq_reg = jz4740_mmc_read_irq_reg(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) tmp = irq_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) irq_reg &= ~host->irq_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) tmp &= ~(JZ_MMC_IRQ_TXFIFO_WR_REQ | JZ_MMC_IRQ_RXFIFO_RD_REQ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) JZ_MMC_IRQ_PRG_DONE | JZ_MMC_IRQ_DATA_TRAN_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (tmp != irq_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) jz4740_mmc_write_irq_reg(host, tmp & ~irq_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (irq_reg & JZ_MMC_IRQ_SDIO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_SDIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) mmc_signal_sdio_irq(host->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) irq_reg &= ~JZ_MMC_IRQ_SDIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) if (host->req && cmd && irq_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (test_and_clear_bit(0, &host->waiting)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) del_timer(&host->timeout_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (status & JZ_MMC_STATUS_TIMEOUT_RES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) cmd->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) } else if (status & JZ_MMC_STATUS_CRC_RES_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) cmd->error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) } else if (status & (JZ_MMC_STATUS_CRC_READ_ERROR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) JZ_MMC_STATUS_CRC_WRITE_ERROR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (cmd->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) cmd->data->error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) cmd->error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) jz4740_mmc_set_irq_enabled(host, irq_reg, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) jz4740_mmc_write_irq_reg(host, irq_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) return IRQ_WAKE_THREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) static int jz4740_mmc_set_clock_rate(struct jz4740_mmc_host *host, int rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) int div = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) int real_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) jz4740_mmc_clock_disable(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) clk_set_rate(host->clk, host->mmc->f_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) real_rate = clk_get_rate(host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) while (real_rate > rate && div < 7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) ++div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) real_rate >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) writew(div, host->base + JZ_REG_MMC_CLKRT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if (real_rate > 25000000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) if (host->version >= JZ_MMC_X1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) writel(JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) JZ_MMC_LPM_LOW_POWER_MODE_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) host->base + JZ_REG_MMC_LPM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) } else if (host->version >= JZ_MMC_JZ4760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) writel(JZ_MMC_LPM_DRV_RISING |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) JZ_MMC_LPM_LOW_POWER_MODE_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) host->base + JZ_REG_MMC_LPM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) } else if (host->version >= JZ_MMC_JZ4725B)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) writel(JZ_MMC_LPM_LOW_POWER_MODE_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) host->base + JZ_REG_MMC_LPM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) return real_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) static void jz4740_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) struct jz4740_mmc_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) host->req = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) jz4740_mmc_write_irq_reg(host, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) host->state = JZ4740_MMC_STATE_READ_RESPONSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) set_bit(0, &host->waiting);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) mod_timer(&host->timeout_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) jiffies + msecs_to_jiffies(JZ_MMC_REQ_TIMEOUT_MS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) jz4740_mmc_send_command(host, req->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) struct jz4740_mmc_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) if (ios->clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) jz4740_mmc_set_clock_rate(host, ios->clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) switch (ios->power_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) case MMC_POWER_UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) jz4740_mmc_reset(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (!IS_ERR(mmc->supply.vmmc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) host->cmdat |= JZ_MMC_CMDAT_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) clk_prepare_enable(host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) case MMC_POWER_ON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) if (!IS_ERR(mmc->supply.vmmc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) clk_disable_unprepare(host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) switch (ios->bus_width) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) case MMC_BUS_WIDTH_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) case MMC_BUS_WIDTH_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_4BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) case MMC_BUS_WIDTH_8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_8BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) static void jz4740_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) struct jz4740_mmc_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_SDIO, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) static const struct mmc_host_ops jz4740_mmc_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) .request = jz4740_mmc_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) .pre_req = jz4740_mmc_pre_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) .post_req = jz4740_mmc_post_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) .set_ios = jz4740_mmc_set_ios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) .get_ro = mmc_gpio_get_ro,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) .get_cd = mmc_gpio_get_cd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) .enable_sdio_irq = jz4740_mmc_enable_sdio_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) static const struct of_device_id jz4740_mmc_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) { .compatible = "ingenic,jz4740-mmc", .data = (void *) JZ_MMC_JZ4740 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) { .compatible = "ingenic,jz4725b-mmc", .data = (void *)JZ_MMC_JZ4725B },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) { .compatible = "ingenic,jz4760-mmc", .data = (void *) JZ_MMC_JZ4760 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) { .compatible = "ingenic,jz4780-mmc", .data = (void *) JZ_MMC_JZ4780 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) { .compatible = "ingenic,x1000-mmc", .data = (void *) JZ_MMC_X1000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) MODULE_DEVICE_TABLE(of, jz4740_mmc_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) static int jz4740_mmc_probe(struct platform_device* pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) struct mmc_host *mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) struct jz4740_mmc_host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) const struct of_device_id *match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) mmc = mmc_alloc_host(sizeof(struct jz4740_mmc_host), &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) if (!mmc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) dev_err(&pdev->dev, "Failed to alloc mmc host structure\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) match = of_match_device(jz4740_mmc_of_match, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (match) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) host->version = (enum jz4740_mmc_version)match->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) /* JZ4740 should be the only one using legacy probe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) host->version = JZ_MMC_JZ4740;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) ret = mmc_of_parse(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) dev_err_probe(&pdev->dev, ret, "could not parse device properties\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) goto err_free_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) mmc_regulator_get_supply(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) host->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if (host->irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) ret = host->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) goto err_free_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) host->clk = devm_clk_get(&pdev->dev, "mmc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if (IS_ERR(host->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) ret = PTR_ERR(host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) dev_err(&pdev->dev, "Failed to get mmc clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) goto err_free_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) host->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) host->base = devm_ioremap_resource(&pdev->dev, host->mem_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) if (IS_ERR(host->base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) ret = PTR_ERR(host->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) dev_err(&pdev->dev, "Failed to ioremap base memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) goto err_free_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) mmc->ops = &jz4740_mmc_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) if (!mmc->f_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) mmc->f_max = JZ_MMC_CLK_RATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) mmc->f_min = mmc->f_max / 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * We use a fixed timeout of 5s, hence inform the core about it. A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * future improvement should instead respect the cmd->busy_timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) mmc->max_busy_timeout = JZ_MMC_REQ_TIMEOUT_MS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) mmc->max_blk_size = (1 << 10) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) mmc->max_blk_count = (1 << 15) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) mmc->max_segs = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) mmc->max_seg_size = mmc->max_req_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) host->mmc = mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) host->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) spin_lock_init(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) host->irq_mask = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) jz4740_mmc_reset(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) ret = request_threaded_irq(host->irq, jz_mmc_irq, jz_mmc_irq_worker, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) dev_name(&pdev->dev), host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) dev_err(&pdev->dev, "Failed to request irq: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) goto err_free_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) jz4740_mmc_clock_disable(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) timer_setup(&host->timeout_timer, jz4740_mmc_timeout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) ret = jz4740_mmc_acquire_dma_channels(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (ret == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) goto err_free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) host->use_dma = !ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) platform_set_drvdata(pdev, host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) ret = mmc_add_host(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) dev_err(&pdev->dev, "Failed to add mmc host: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) goto err_release_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) dev_info(&pdev->dev, "Ingenic SD/MMC card driver registered\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) dev_info(&pdev->dev, "Using %s, %d-bit mode\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) host->use_dma ? "DMA" : "PIO",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) (mmc->caps & MMC_CAP_8_BIT_DATA) ? 8 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) ((mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) err_release_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (host->use_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) jz4740_mmc_release_dma_channels(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) err_free_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) free_irq(host->irq, host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) err_free_host:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) mmc_free_host(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) static int jz4740_mmc_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) struct jz4740_mmc_host *host = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) del_timer_sync(&host->timeout_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) jz4740_mmc_set_irq_enabled(host, 0xff, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) jz4740_mmc_reset(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) mmc_remove_host(host->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) free_irq(host->irq, host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) if (host->use_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) jz4740_mmc_release_dma_channels(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) mmc_free_host(host->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) static int __maybe_unused jz4740_mmc_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) return pinctrl_pm_select_sleep_state(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) static int __maybe_unused jz4740_mmc_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) return pinctrl_select_default_state(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) static SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) jz4740_mmc_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) static struct platform_driver jz4740_mmc_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) .probe = jz4740_mmc_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) .remove = jz4740_mmc_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) .name = "jz4740-mmc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) .probe_type = PROBE_PREFER_ASYNCHRONOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) .of_match_table = of_match_ptr(jz4740_mmc_of_match),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) .pm = pm_ptr(&jz4740_mmc_pm_ops),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) module_platform_driver(jz4740_mmc_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) MODULE_DESCRIPTION("JZ4740 SD/MMC controller driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");