^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * MMCIF eMMC driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2010 Renesas Solutions Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Yusuke Goda <yusuke.goda.sx@renesas.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * The MMCIF driver is now processing MMC requests asynchronously, according
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * to the Linux MMC API requirement.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * The MMCIF driver processes MMC requests in up to 3 stages: command, optional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * data, and optional stop. To achieve asynchronous processing each of these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * stages is split into two halves: a top and a bottom half. The top half
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * initialises the hardware, installs a timeout handler to handle completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * timeouts, and returns. In case of the command stage this immediately returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * control to the caller, leaving all further processing to run asynchronously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * All further request processing is performed by the bottom halves.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * The bottom half further consists of a "hard" IRQ handler, an IRQ handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * thread, a DMA completion callback, if DMA is used, a timeout work, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * request- and stage-specific handler methods.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * Each bottom half run begins with either a hardware interrupt, a DMA callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * invocation, or a timeout work run. In case of an error or a successful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * processing completion, the MMC core is informed and the request processing is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * finished. In case processing has to continue, i.e., if data has to be read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * from or written to the card, or if a stop command has to be sent, the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * top half is called, which performs the necessary hardware handling and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * reschedules the timeout work. This returns the driver state machine into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * bottom half waiting state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/mmc/card.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <linux/mmc/core.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <linux/mmc/host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <linux/mmc/mmc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <linux/mmc/sdio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <linux/mmc/sh_mmcif.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <linux/mmc/slot-gpio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/mod_devicetable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <linux/pm_qos.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include <linux/sh_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define DRIVER_NAME "sh_mmcif"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /* CE_CMD_SET */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define CMD_MASK 0x3f000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define CMD_SET_RTYP_NO ((0 << 23) | (0 << 22))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define CMD_SET_RTYP_6B ((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define CMD_SET_RTYP_17B ((1 << 23) | (0 << 22)) /* R2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define CMD_SET_RBSY (1 << 21) /* R1b */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define CMD_SET_CCSEN (1 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define CMD_SET_WDAT (1 << 19) /* 1: on data, 0: no data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define CMD_SET_DWEN (1 << 18) /* 1: write, 0: read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define CMD_SET_CMLTE (1 << 17) /* 1: multi block trans, 0: single */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define CMD_SET_CMD12EN (1 << 16) /* 1: CMD12 auto issue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define CMD_SET_RIDXC_INDEX ((0 << 15) | (0 << 14)) /* index check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define CMD_SET_RIDXC_BITS ((0 << 15) | (1 << 14)) /* check bits check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define CMD_SET_RIDXC_NO ((1 << 15) | (0 << 14)) /* no check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define CMD_SET_CRC7C ((0 << 13) | (0 << 12)) /* CRC7 check*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define CMD_SET_CRC7C_BITS ((0 << 13) | (1 << 12)) /* check bits check*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define CMD_SET_CRC7C_INTERNAL ((1 << 13) | (0 << 12)) /* internal CRC7 check*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define CMD_SET_CRC16C (1 << 10) /* 0: CRC16 check*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define CMD_SET_CRCSTE (1 << 8) /* 1: not receive CRC status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define CMD_SET_TBIT (1 << 7) /* 1: tran mission bit "Low" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define CMD_SET_OPDM (1 << 6) /* 1: open/drain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define CMD_SET_CCSH (1 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define CMD_SET_DARS (1 << 2) /* Dual Data Rate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define CMD_SET_DATW_1 ((0 << 1) | (0 << 0)) /* 1bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define CMD_SET_DATW_4 ((0 << 1) | (1 << 0)) /* 4bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define CMD_SET_DATW_8 ((1 << 1) | (0 << 0)) /* 8bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /* CE_CMD_CTRL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define CMD_CTRL_BREAK (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* CE_BLOCK_SET */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define BLOCK_SIZE_MASK 0x0000ffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /* CE_INT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define INT_CCSDE (1 << 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define INT_CMD12DRE (1 << 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define INT_CMD12RBE (1 << 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define INT_CMD12CRE (1 << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define INT_DTRANE (1 << 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define INT_BUFRE (1 << 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define INT_BUFWEN (1 << 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define INT_BUFREN (1 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define INT_CCSRCV (1 << 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define INT_RBSYE (1 << 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define INT_CRSPE (1 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define INT_CMDVIO (1 << 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define INT_BUFVIO (1 << 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define INT_WDATERR (1 << 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define INT_RDATERR (1 << 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define INT_RIDXERR (1 << 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define INT_RSPERR (1 << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define INT_CCSTO (1 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define INT_CRCSTO (1 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define INT_WDATTO (1 << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define INT_RDATTO (1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define INT_RBSYTO (1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define INT_RSPTO (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define INT_ERR_STS (INT_CMDVIO | INT_BUFVIO | INT_WDATERR | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) INT_CCSTO | INT_CRCSTO | INT_WDATTO | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) INT_RDATTO | INT_RBSYTO | INT_RSPTO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define INT_ALL (INT_RBSYE | INT_CRSPE | INT_BUFREN | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) INT_BUFWEN | INT_CMD12DRE | INT_BUFRE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) INT_DTRANE | INT_CMD12RBE | INT_CMD12CRE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define INT_CCS (INT_CCSTO | INT_CCSRCV | INT_CCSDE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* CE_INT_MASK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define MASK_ALL 0x00000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define MASK_MCCSDE (1 << 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define MASK_MCMD12DRE (1 << 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define MASK_MCMD12RBE (1 << 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define MASK_MCMD12CRE (1 << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define MASK_MDTRANE (1 << 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define MASK_MBUFRE (1 << 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define MASK_MBUFWEN (1 << 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define MASK_MBUFREN (1 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define MASK_MCCSRCV (1 << 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define MASK_MRBSYE (1 << 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define MASK_MCRSPE (1 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define MASK_MCMDVIO (1 << 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define MASK_MBUFVIO (1 << 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define MASK_MWDATERR (1 << 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define MASK_MRDATERR (1 << 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define MASK_MRIDXERR (1 << 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define MASK_MRSPERR (1 << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #define MASK_MCCSTO (1 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #define MASK_MCRCSTO (1 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define MASK_MWDATTO (1 << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #define MASK_MRDATTO (1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #define MASK_MRBSYTO (1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #define MASK_MRSPTO (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define MASK_START_CMD (MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) MASK_MCRCSTO | MASK_MWDATTO | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #define MASK_CLEAN (INT_ERR_STS | MASK_MRBSYE | MASK_MCRSPE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) MASK_MBUFREN | MASK_MBUFWEN | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) MASK_MCMD12DRE | MASK_MBUFRE | MASK_MDTRANE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) MASK_MCMD12RBE | MASK_MCMD12CRE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /* CE_HOST_STS1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #define STS1_CMDSEQ (1 << 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /* CE_HOST_STS2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #define STS2_CRCSTE (1 << 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #define STS2_CRC16E (1 << 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #define STS2_AC12CRCE (1 << 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #define STS2_RSPCRC7E (1 << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #define STS2_CRCSTEBE (1 << 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #define STS2_RDATEBE (1 << 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define STS2_AC12REBE (1 << 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define STS2_RSPEBE (1 << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #define STS2_AC12IDXE (1 << 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #define STS2_RSPIDXE (1 << 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) #define STS2_CCSTO (1 << 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #define STS2_RDATTO (1 << 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #define STS2_DATBSYTO (1 << 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #define STS2_CRCSTTO (1 << 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #define STS2_AC12BSYTO (1 << 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #define STS2_RSPBSYTO (1 << 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #define STS2_AC12RSPTO (1 << 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #define STS2_RSPTO (1 << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #define STS2_CRC_ERR (STS2_CRCSTE | STS2_CRC16E | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #define STS2_TIMEOUT_ERR (STS2_CCSTO | STS2_RDATTO | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) STS2_DATBSYTO | STS2_CRCSTTO | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) STS2_AC12BSYTO | STS2_RSPBSYTO | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) STS2_AC12RSPTO | STS2_RSPTO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #define CLKDEV_EMMC_DATA 52000000 /* 52 MHz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #define CLKDEV_MMC_DATA 20000000 /* 20 MHz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #define CLKDEV_INIT 400000 /* 400 kHz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) enum sh_mmcif_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) STATE_IDLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) STATE_REQUEST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) STATE_IOS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) STATE_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) enum sh_mmcif_wait_for {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) MMCIF_WAIT_FOR_REQUEST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) MMCIF_WAIT_FOR_CMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) MMCIF_WAIT_FOR_MREAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) MMCIF_WAIT_FOR_MWRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) MMCIF_WAIT_FOR_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) MMCIF_WAIT_FOR_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) MMCIF_WAIT_FOR_READ_END,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) MMCIF_WAIT_FOR_WRITE_END,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) MMCIF_WAIT_FOR_STOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * difference for each SoC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct sh_mmcif_host {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct mmc_host *mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct mmc_request *mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct platform_device *pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) int bus_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) unsigned char timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) bool sd_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) bool dying;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) void __iomem *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) u32 *pio_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) spinlock_t lock; /* protect sh_mmcif_host::state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) enum sh_mmcif_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) enum sh_mmcif_wait_for wait_for;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct delayed_work timeout_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) size_t blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) int sg_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) int sg_blkidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) bool power;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) bool ccs_enable; /* Command Completion Signal support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) bool clk_ctrl2_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct mutex thread_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) u32 clkdiv_map; /* see CE_CLK_CTRL::CLKDIV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) /* DMA support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct dma_chan *chan_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct dma_chan *chan_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct completion dma_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) bool dma_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static const struct of_device_id sh_mmcif_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) { .compatible = "renesas,sh-mmcif" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) MODULE_DEVICE_TABLE(of, sh_mmcif_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) #define sh_mmcif_host_to_dev(host) (&host->pd->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) unsigned int reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) writel(val | readl(host->addr + reg), host->addr + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) unsigned int reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) writel(~val & readl(host->addr + reg), host->addr + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static void sh_mmcif_dma_complete(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) struct sh_mmcif_host *host = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) struct mmc_request *mrq = host->mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct device *dev = sh_mmcif_host_to_dev(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) dev_dbg(dev, "Command completed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) dev_name(dev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) complete(&host->dma_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct mmc_data *data = host->mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct scatterlist *sg = data->sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct dma_async_tx_descriptor *desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct dma_chan *chan = host->chan_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct device *dev = sh_mmcif_host_to_dev(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) dma_cookie_t cookie = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) host->dma_active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) desc = dmaengine_prep_slave_sg(chan, sg, ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) desc->callback = sh_mmcif_dma_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) desc->callback_param = host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) cookie = dmaengine_submit(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) dma_async_issue_pending(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) __func__, data->sg_len, ret, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /* DMA failed, fall back to PIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (ret >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) host->chan_rx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) host->dma_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) dma_release_channel(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /* Free the Tx channel too */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) chan = host->chan_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) host->chan_tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) dma_release_channel(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) dev_warn(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) "DMA failed: %d, falling back to PIO\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) dev_dbg(dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) desc, cookie, data->sg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct mmc_data *data = host->mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) struct scatterlist *sg = data->sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct dma_async_tx_descriptor *desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct dma_chan *chan = host->chan_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct device *dev = sh_mmcif_host_to_dev(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) dma_cookie_t cookie = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) host->dma_active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) desc = dmaengine_prep_slave_sg(chan, sg, ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) desc->callback = sh_mmcif_dma_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) desc->callback_param = host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) cookie = dmaengine_submit(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) dma_async_issue_pending(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) __func__, data->sg_len, ret, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) /* DMA failed, fall back to PIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (ret >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) host->chan_tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) host->dma_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) dma_release_channel(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /* Free the Rx channel too */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) chan = host->chan_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) host->chan_rx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) dma_release_channel(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) dev_warn(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) "DMA failed: %d, falling back to PIO\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) dev_dbg(dev, "%s(): desc %p, cookie %d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) desc, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static struct dma_chan *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) sh_mmcif_request_dma_pdata(struct sh_mmcif_host *host, uintptr_t slave_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) dma_cap_mask_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) dma_cap_zero(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) dma_cap_set(DMA_SLAVE, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (slave_id <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) return dma_request_channel(mask, shdma_chan_filter, (void *)slave_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) static int sh_mmcif_dma_slave_config(struct sh_mmcif_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) enum dma_transfer_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) struct dma_slave_config cfg = { 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) cfg.direction = direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (direction == DMA_DEV_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) cfg.src_addr = res->start + MMCIF_CE_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) cfg.dst_addr = res->start + MMCIF_CE_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) return dmaengine_slave_config(chan, &cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) static void sh_mmcif_request_dma(struct sh_mmcif_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct device *dev = sh_mmcif_host_to_dev(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) host->dma_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /* We can only either use DMA for both Tx and Rx or not use it at all */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (IS_ENABLED(CONFIG_SUPERH) && dev->platform_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) struct sh_mmcif_plat_data *pdata = dev->platform_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) host->chan_tx = sh_mmcif_request_dma_pdata(host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) pdata->slave_id_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) host->chan_rx = sh_mmcif_request_dma_pdata(host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) pdata->slave_id_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) host->chan_tx = dma_request_chan(dev, "tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (IS_ERR(host->chan_tx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) host->chan_tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) host->chan_rx = dma_request_chan(dev, "rx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (IS_ERR(host->chan_rx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) host->chan_rx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) host->chan_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (!host->chan_tx || !host->chan_rx ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) sh_mmcif_dma_slave_config(host, host->chan_tx, DMA_MEM_TO_DEV) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) sh_mmcif_dma_slave_config(host, host->chan_rx, DMA_DEV_TO_MEM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (host->chan_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) dma_release_channel(host->chan_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (host->chan_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) dma_release_channel(host->chan_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) host->chan_tx = host->chan_rx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) /* Descriptors are freed automatically */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (host->chan_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct dma_chan *chan = host->chan_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) host->chan_tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) dma_release_channel(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (host->chan_rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) struct dma_chan *chan = host->chan_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) host->chan_rx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) dma_release_channel(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) host->dma_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct device *dev = sh_mmcif_host_to_dev(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct sh_mmcif_plat_data *p = dev->platform_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) bool sup_pclk = p ? p->sup_pclk : false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) unsigned int current_clk = clk_get_rate(host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) unsigned int clkdiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (!clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (host->clkdiv_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) unsigned int freq, best_freq, myclk, div, diff_min, diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) clkdiv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) diff_min = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) best_freq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) for (i = 31; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (!((1 << i) & host->clkdiv_map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * clk = parent_freq / div
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * -> parent_freq = clk x div
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) div = 1 << (i + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) freq = clk_round_rate(host->clk, clk * div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) myclk = freq / div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) diff = (myclk > clk) ? myclk - clk : clk - myclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (diff <= diff_min) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) best_freq = freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) clkdiv = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) diff_min = diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) dev_dbg(dev, "clk %u/%u (%u, 0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) (best_freq / (1 << (clkdiv + 1))), clk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) best_freq, clkdiv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) clk_set_rate(host->clk, best_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) clkdiv = clkdiv << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) } else if (sup_pclk && clk == current_clk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) clkdiv = CLK_SUP_PCLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) clkdiv = (fls(DIV_ROUND_UP(current_clk, clk) - 1) - 1) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR & clkdiv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (host->ccs_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) tmp |= SCCSTO_29;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (host->clk_ctrl2_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) sh_mmcif_writel(host->addr, MMCIF_CE_CLK_CTRL2, 0x0F0F0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) SRSPTO_256 | SRBSYTO_29 | SRWDTO_29);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) /* byte swap on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct device *dev = sh_mmcif_host_to_dev(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) u32 state1, state2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) int ret, timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) host->sd_error = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) dev_dbg(dev, "ERR HOST_STS1 = %08x\n", state1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) dev_dbg(dev, "ERR HOST_STS2 = %08x\n", state2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (state1 & STS1_CMDSEQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) for (timeout = 10000; timeout; timeout--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) & STS1_CMDSEQ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (!timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) dev_err(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) "Forced end of command sequence timeout err\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) sh_mmcif_sync_reset(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) dev_dbg(dev, "Forced end of command sequence\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (state2 & STS2_CRC_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) dev_err(dev, " CRC error: state %u, wait %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) host->state, host->wait_for);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) } else if (state2 & STS2_TIMEOUT_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) dev_err(dev, " Timeout: state %u, wait %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) host->state, host->wait_for);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) dev_dbg(dev, " End/Index error: state %u, wait %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) host->state, host->wait_for);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) struct mmc_data *data = host->mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) host->sg_blkidx += host->blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) /* data->sg->length must be a multiple of host->blocksize? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) BUG_ON(host->sg_blkidx > data->sg->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (host->sg_blkidx == data->sg->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) host->sg_blkidx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (++host->sg_idx < data->sg_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) host->pio_ptr = sg_virt(++data->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) host->pio_ptr = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) return host->sg_idx != data->sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) static void sh_mmcif_single_read(struct sh_mmcif_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) BLOCK_SIZE_MASK) + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) host->wait_for = MMCIF_WAIT_FOR_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) /* buf read enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) struct device *dev = sh_mmcif_host_to_dev(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) struct mmc_data *data = host->mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) u32 *p = sg_virt(data->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (host->sd_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) data->error = sh_mmcif_error_manage(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) dev_dbg(dev, "%s(): %d\n", __func__, data->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) for (i = 0; i < host->blocksize / 4; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) /* buffer read end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) host->wait_for = MMCIF_WAIT_FOR_READ_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) struct mmc_data *data = mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (!data->sg_len || !data->sg->length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) BLOCK_SIZE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) host->wait_for = MMCIF_WAIT_FOR_MREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) host->sg_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) host->sg_blkidx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) host->pio_ptr = sg_virt(data->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) struct device *dev = sh_mmcif_host_to_dev(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) struct mmc_data *data = host->mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) u32 *p = host->pio_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (host->sd_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) data->error = sh_mmcif_error_manage(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) dev_dbg(dev, "%s(): %d\n", __func__, data->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) BUG_ON(!data->sg->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) for (i = 0; i < host->blocksize / 4; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (!sh_mmcif_next_block(host, p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) static void sh_mmcif_single_write(struct sh_mmcif_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) BLOCK_SIZE_MASK) + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) host->wait_for = MMCIF_WAIT_FOR_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) /* buf write enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) struct device *dev = sh_mmcif_host_to_dev(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) struct mmc_data *data = host->mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) u32 *p = sg_virt(data->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (host->sd_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) data->error = sh_mmcif_error_manage(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) dev_dbg(dev, "%s(): %d\n", __func__, data->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) for (i = 0; i < host->blocksize / 4; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) /* buffer write end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) host->wait_for = MMCIF_WAIT_FOR_WRITE_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) struct mmc_data *data = mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if (!data->sg_len || !data->sg->length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) BLOCK_SIZE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) host->wait_for = MMCIF_WAIT_FOR_MWRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) host->sg_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) host->sg_blkidx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) host->pio_ptr = sg_virt(data->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) struct device *dev = sh_mmcif_host_to_dev(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) struct mmc_data *data = host->mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) u32 *p = host->pio_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (host->sd_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) data->error = sh_mmcif_error_manage(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) dev_dbg(dev, "%s(): %d\n", __func__, data->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) BUG_ON(!data->sg->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) for (i = 0; i < host->blocksize / 4; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (!sh_mmcif_next_block(host, p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) static void sh_mmcif_get_response(struct sh_mmcif_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (cmd->flags & MMC_RSP_136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) struct device *dev = sh_mmcif_host_to_dev(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) struct mmc_data *data = mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) struct mmc_command *cmd = mrq->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) u32 opc = cmd->opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) u32 tmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) /* Response Type check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) switch (mmc_resp_type(cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) case MMC_RSP_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) tmp |= CMD_SET_RTYP_NO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) case MMC_RSP_R1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) case MMC_RSP_R3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) tmp |= CMD_SET_RTYP_6B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) case MMC_RSP_R1B:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) tmp |= CMD_SET_RBSY | CMD_SET_RTYP_6B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) case MMC_RSP_R2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) tmp |= CMD_SET_RTYP_17B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) dev_err(dev, "Unsupported response type.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) /* WDAT / DATW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) if (data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) tmp |= CMD_SET_WDAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) switch (host->bus_width) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) case MMC_BUS_WIDTH_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) tmp |= CMD_SET_DATW_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) case MMC_BUS_WIDTH_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) tmp |= CMD_SET_DATW_4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) case MMC_BUS_WIDTH_8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) tmp |= CMD_SET_DATW_8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) dev_err(dev, "Unsupported bus width.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) switch (host->timing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) case MMC_TIMING_MMC_DDR52:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * MMC core will only set this timing, if the host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * advertises the MMC_CAP_1_8V_DDR/MMC_CAP_1_2V_DDR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * capability. MMCIF implementations with this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * capability, e.g. sh73a0, will have to set it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * in their platform data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) tmp |= CMD_SET_DARS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) /* DWEN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) tmp |= CMD_SET_DWEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) /* CMLTE/CMD12EN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) data->blocks << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) /* RIDXC[1:0] check bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) tmp |= CMD_SET_RIDXC_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) /* RCRC7C[1:0] check bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (opc == MMC_SEND_OP_COND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) tmp |= CMD_SET_CRC7C_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) /* RCRC7C[1:0] internal CRC7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) if (opc == MMC_ALL_SEND_CID ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) tmp |= CMD_SET_CRC7C_INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) return (opc << 24) | tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) struct mmc_request *mrq, u32 opc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) struct device *dev = sh_mmcif_host_to_dev(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) switch (opc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) case MMC_READ_MULTIPLE_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) sh_mmcif_multi_read(host, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) case MMC_WRITE_MULTIPLE_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) sh_mmcif_multi_write(host, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) case MMC_WRITE_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) sh_mmcif_single_write(host, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) case MMC_READ_SINGLE_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) case MMC_SEND_EXT_CSD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) sh_mmcif_single_read(host, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) dev_err(dev, "Unsupported CMD%d\n", opc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) struct mmc_command *cmd = mrq->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) u32 opc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) u32 mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (cmd->flags & MMC_RSP_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) mask = MASK_START_CMD | MASK_MRBSYE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) mask = MASK_START_CMD | MASK_MCRSPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) if (host->ccs_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) mask |= MASK_MCCSTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) if (mrq->data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) mrq->data->blksz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) opc = sh_mmcif_set_cmd(host, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) if (host->ccs_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0 | INT_CCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) /* set arg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) /* set cmd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) host->wait_for = MMCIF_WAIT_FOR_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) schedule_delayed_work(&host->timeout_work, host->timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) struct device *dev = sh_mmcif_host_to_dev(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) switch (mrq->cmd->opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) case MMC_READ_MULTIPLE_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) case MMC_WRITE_MULTIPLE_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) dev_err(dev, "unsupported stop cmd\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) mrq->stop->error = sh_mmcif_error_manage(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) host->wait_for = MMCIF_WAIT_FOR_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) struct sh_mmcif_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) struct device *dev = sh_mmcif_host_to_dev(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if (host->state != STATE_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) dev_dbg(dev, "%s() rejected, state %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) __func__, host->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) mrq->cmd->error = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) mmc_request_done(mmc, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) host->state = STATE_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) host->mrq = mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) sh_mmcif_start_cmd(host, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) static void sh_mmcif_clk_setup(struct sh_mmcif_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) struct device *dev = sh_mmcif_host_to_dev(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (host->mmc->f_max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) unsigned int f_max, f_min = 0, f_min_old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) f_max = host->mmc->f_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) for (f_min_old = f_max; f_min_old > 2;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) f_min = clk_round_rate(host->clk, f_min_old / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) if (f_min == f_min_old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) f_min_old = f_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) * This driver assumes this SoC is R-Car Gen2 or later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) host->clkdiv_map = 0x3ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) host->mmc->f_max = f_max / (1 << ffs(host->clkdiv_map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) host->mmc->f_min = f_min / (1 << fls(host->clkdiv_map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) unsigned int clk = clk_get_rate(host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) host->mmc->f_max = clk / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) host->mmc->f_min = clk / 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) dev_dbg(dev, "clk max/min = %d/%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) host->mmc->f_max, host->mmc->f_min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) struct sh_mmcif_host *host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) struct device *dev = sh_mmcif_host_to_dev(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (host->state != STATE_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) dev_dbg(dev, "%s() rejected, state %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) __func__, host->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) host->state = STATE_IOS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) switch (ios->power_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) case MMC_POWER_UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (!IS_ERR(mmc->supply.vmmc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (!host->power) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) clk_prepare_enable(host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) pm_runtime_get_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) sh_mmcif_sync_reset(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) sh_mmcif_request_dma(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) host->power = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) case MMC_POWER_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) if (!IS_ERR(mmc->supply.vmmc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) if (host->power) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) sh_mmcif_clock_control(host, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) sh_mmcif_release_dma(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) pm_runtime_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) clk_disable_unprepare(host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) host->power = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) case MMC_POWER_ON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) sh_mmcif_clock_control(host, ios->clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) host->timing = ios->timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) host->bus_width = ios->bus_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) host->state = STATE_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) static const struct mmc_host_ops sh_mmcif_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) .request = sh_mmcif_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) .set_ios = sh_mmcif_set_ios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) .get_cd = mmc_gpio_get_cd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) struct mmc_command *cmd = host->mrq->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) struct mmc_data *data = host->mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) struct device *dev = sh_mmcif_host_to_dev(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) long time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) if (host->sd_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) switch (cmd->opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) case MMC_ALL_SEND_CID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) case MMC_SELECT_CARD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) case MMC_APP_CMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) cmd->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) cmd->error = sh_mmcif_error_manage(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) dev_dbg(dev, "CMD%d error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) cmd->opcode, cmd->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) host->sd_error = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) if (!(cmd->flags & MMC_RSP_PRESENT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) cmd->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) sh_mmcif_get_response(host, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) * Completion can be signalled from DMA callback and error, so, have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) * reset here, before setting .dma_active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) init_completion(&host->dma_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if (data->flags & MMC_DATA_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) if (host->chan_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) sh_mmcif_start_dma_rx(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) if (host->chan_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) sh_mmcif_start_dma_tx(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) if (!host->dma_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) return !data->error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) /* Running in the IRQ thread, can sleep */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) time = wait_for_completion_interruptible_timeout(&host->dma_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) host->timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (data->flags & MMC_DATA_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) dma_unmap_sg(host->chan_rx->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) data->sg, data->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) dma_unmap_sg(host->chan_tx->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) data->sg, data->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) if (host->sd_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) dev_err(host->mmc->parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) "Error IRQ while waiting for DMA completion!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) /* Woken up by an error IRQ: abort DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) data->error = sh_mmcif_error_manage(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) } else if (!time) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) dev_err(host->mmc->parent, "DMA timeout!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) data->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) } else if (time < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) dev_err(host->mmc->parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) "wait_for_completion_...() error %ld!\n", time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) data->error = time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) host->dma_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) if (data->error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) data->bytes_xfered = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) /* Abort DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) if (data->flags & MMC_DATA_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) dmaengine_terminate_all(host->chan_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) dmaengine_terminate_all(host->chan_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) struct sh_mmcif_host *host = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) struct mmc_request *mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) struct device *dev = sh_mmcif_host_to_dev(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) bool wait = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) int wait_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) wait_work = host->wait_for;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) cancel_delayed_work_sync(&host->timeout_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) mutex_lock(&host->thread_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) mrq = host->mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) if (!mrq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) dev_dbg(dev, "IRQ thread state %u, wait %u: NULL mrq!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) host->state, host->wait_for);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) mutex_unlock(&host->thread_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) * All handlers return true, if processing continues, and false, if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) * request has to be completed - successfully or not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) switch (wait_work) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) case MMCIF_WAIT_FOR_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) /* We're too late, the timeout has already kicked in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) mutex_unlock(&host->thread_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) case MMCIF_WAIT_FOR_CMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) /* Wait for data? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) wait = sh_mmcif_end_cmd(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) case MMCIF_WAIT_FOR_MREAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) /* Wait for more data? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) wait = sh_mmcif_mread_block(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) case MMCIF_WAIT_FOR_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) /* Wait for data end? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) wait = sh_mmcif_read_block(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) case MMCIF_WAIT_FOR_MWRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) /* Wait data to write? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) wait = sh_mmcif_mwrite_block(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) case MMCIF_WAIT_FOR_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) /* Wait for data end? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) wait = sh_mmcif_write_block(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) case MMCIF_WAIT_FOR_STOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) if (host->sd_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) mrq->stop->error = sh_mmcif_error_manage(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) dev_dbg(dev, "%s(): %d\n", __func__, mrq->stop->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) sh_mmcif_get_cmd12response(host, mrq->stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) mrq->stop->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) case MMCIF_WAIT_FOR_READ_END:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) case MMCIF_WAIT_FOR_WRITE_END:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) if (host->sd_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) mrq->data->error = sh_mmcif_error_manage(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) dev_dbg(dev, "%s(): %d\n", __func__, mrq->data->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) if (wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) schedule_delayed_work(&host->timeout_work, host->timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) /* Wait for more data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) mutex_unlock(&host->thread_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) struct mmc_data *data = mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) if (!mrq->cmd->error && data && !data->error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) data->bytes_xfered =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) data->blocks * data->blksz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) sh_mmcif_stop_cmd(host, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) if (!mrq->stop->error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) schedule_delayed_work(&host->timeout_work, host->timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) mutex_unlock(&host->thread_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) host->wait_for = MMCIF_WAIT_FOR_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) host->state = STATE_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) host->mrq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) mmc_request_done(host->mmc, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) mutex_unlock(&host->thread_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) struct sh_mmcif_host *host = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) struct device *dev = sh_mmcif_host_to_dev(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) u32 state, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) mask = sh_mmcif_readl(host->addr, MMCIF_CE_INT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) if (host->ccs_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~(state & mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) sh_mmcif_writel(host->addr, MMCIF_CE_INT, INT_CCS | ~(state & mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) if (state & ~MASK_CLEAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) dev_dbg(dev, "IRQ state = 0x%08x incompletely cleared\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) if (state & INT_ERR_STS || state & ~INT_ALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) host->sd_error = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) dev_dbg(dev, "int err state = 0x%08x\n", state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) if (!host->mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) dev_dbg(dev, "NULL IRQ state = 0x%08x\n", state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) if (!host->dma_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) return IRQ_WAKE_THREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) else if (host->sd_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) sh_mmcif_dma_complete(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) dev_dbg(dev, "Unexpected IRQ 0x%x\n", state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) static void sh_mmcif_timeout_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) struct delayed_work *d = to_delayed_work(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) struct mmc_request *mrq = host->mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) struct device *dev = sh_mmcif_host_to_dev(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) if (host->dying)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) /* Don't run after mmc_remove_host() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) if (host->state == STATE_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) dev_err(dev, "Timeout waiting for %u on CMD%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) host->wait_for, mrq->cmd->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) host->state = STATE_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) * Handle races with cancel_delayed_work(), unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) * cancel_delayed_work_sync() is used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) switch (host->wait_for) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) case MMCIF_WAIT_FOR_CMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) mrq->cmd->error = sh_mmcif_error_manage(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) case MMCIF_WAIT_FOR_STOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) mrq->stop->error = sh_mmcif_error_manage(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) case MMCIF_WAIT_FOR_MREAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) case MMCIF_WAIT_FOR_MWRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) case MMCIF_WAIT_FOR_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) case MMCIF_WAIT_FOR_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) case MMCIF_WAIT_FOR_READ_END:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) case MMCIF_WAIT_FOR_WRITE_END:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) mrq->data->error = sh_mmcif_error_manage(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) host->state = STATE_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) host->wait_for = MMCIF_WAIT_FOR_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) host->mrq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) mmc_request_done(host->mmc, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) static void sh_mmcif_init_ocr(struct sh_mmcif_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) struct device *dev = sh_mmcif_host_to_dev(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) struct sh_mmcif_plat_data *pd = dev->platform_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) struct mmc_host *mmc = host->mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) mmc_regulator_get_supply(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) if (!pd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) if (!mmc->ocr_avail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) mmc->ocr_avail = pd->ocr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) else if (pd->ocr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) static int sh_mmcif_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) int ret = 0, irq[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) struct mmc_host *mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) struct sh_mmcif_host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) struct sh_mmcif_plat_data *pd = dev->platform_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) void __iomem *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) irq[0] = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) irq[1] = platform_get_irq_optional(pdev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) if (irq[0] < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) reg = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) if (IS_ERR(reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) return PTR_ERR(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) if (!mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) ret = mmc_of_parse(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) goto err_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) host = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) host->mmc = mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) host->addr = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) host->timeout = msecs_to_jiffies(10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) host->ccs_enable = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) host->clk_ctrl2_enable = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) host->pd = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) spin_lock_init(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) mmc->ops = &sh_mmcif_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) sh_mmcif_init_ocr(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) mmc->caps2 |= MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) mmc->max_busy_timeout = 10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) if (pd && pd->caps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) mmc->caps |= pd->caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) mmc->max_segs = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) mmc->max_blk_size = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) mmc->max_seg_size = mmc->max_req_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) platform_set_drvdata(pdev, host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) host->clk = devm_clk_get(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) if (IS_ERR(host->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) ret = PTR_ERR(host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) dev_err(dev, "cannot get clock: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) goto err_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) ret = clk_prepare_enable(host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) goto err_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) sh_mmcif_clk_setup(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) pm_runtime_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) host->power = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) ret = pm_runtime_get_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) goto err_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) INIT_DELAYED_WORK(&host->timeout_work, sh_mmcif_timeout_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) sh_mmcif_sync_reset(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) name = irq[1] < 0 ? dev_name(dev) : "sh_mmc:error";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) ret = devm_request_threaded_irq(dev, irq[0], sh_mmcif_intr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) sh_mmcif_irqt, 0, name, host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) dev_err(dev, "request_irq error (%s)\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) goto err_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) if (irq[1] >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) ret = devm_request_threaded_irq(dev, irq[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) sh_mmcif_intr, sh_mmcif_irqt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 0, "sh_mmc:int", host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) dev_err(dev, "request_irq error (sh_mmc:int)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) goto err_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) mutex_init(&host->thread_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) ret = mmc_add_host(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) goto err_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) dev_pm_qos_expose_latency_limit(dev, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) dev_info(dev, "Chip version 0x%04x, clock rate %luMHz\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0xffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) clk_get_rate(host->clk) / 1000000UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) pm_runtime_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) clk_disable_unprepare(host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) err_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) clk_disable_unprepare(host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) pm_runtime_put_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) pm_runtime_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) err_host:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) mmc_free_host(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) static int sh_mmcif_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) struct sh_mmcif_host *host = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) host->dying = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) clk_prepare_enable(host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) pm_runtime_get_sync(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) dev_pm_qos_hide_latency_limit(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) mmc_remove_host(host->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) * mmc_remove_host() call above. But swapping order doesn't help either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) * (a query on the linux-mmc mailing list didn't bring any replies).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) cancel_delayed_work_sync(&host->timeout_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) clk_disable_unprepare(host->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) mmc_free_host(host->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) pm_runtime_put_sync(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) static int sh_mmcif_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) struct sh_mmcif_host *host = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) pm_runtime_get_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) pm_runtime_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) static int sh_mmcif_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) SET_SYSTEM_SLEEP_PM_OPS(sh_mmcif_suspend, sh_mmcif_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) static struct platform_driver sh_mmcif_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) .probe = sh_mmcif_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) .remove = sh_mmcif_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) .name = DRIVER_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) .probe_type = PROBE_PREFER_ASYNCHRONOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) .pm = &sh_mmcif_dev_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) .of_match_table = sh_mmcif_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) module_platform_driver(sh_mmcif_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) MODULE_ALIAS("platform:" DRIVER_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");