^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * WM8505/WM8650 SD/MMC Host Controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2010 Tony Prisk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2008 WonderMedia Technologies, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/mmc/host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/mmc/mmc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/mmc/sd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define DRIVER_NAME "wmt-sdhc"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /* MMC/SD controller registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define SDMMC_CTLR 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define SDMMC_CMD 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define SDMMC_RSPTYPE 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define SDMMC_ARG 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define SDMMC_BUSMODE 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define SDMMC_BLKLEN 0x0C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define SDMMC_BLKCNT 0x0E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define SDMMC_RSP 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define SDMMC_CBCR 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define SDMMC_INTMASK0 0x24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define SDMMC_INTMASK1 0x25
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define SDMMC_STS0 0x28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define SDMMC_STS1 0x29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define SDMMC_STS2 0x2A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define SDMMC_STS3 0x2B
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define SDMMC_RSPTIMEOUT 0x2C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define SDMMC_CLK 0x30 /* VT8500 only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define SDMMC_EXTCTRL 0x34
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define SDMMC_SBLKLEN 0x38
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define SDMMC_DMATIMEOUT 0x3C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /* SDMMC_CTLR bit fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define CTLR_CMD_START 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define CTLR_CMD_WRITE 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define CTLR_FIFO_RESET 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /* SDMMC_BUSMODE bit fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define BM_SPI_MODE 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define BM_FOURBIT_MODE 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define BM_EIGHTBIT_MODE 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define BM_SD_OFF 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define BM_SPI_CS 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define BM_SD_POWER 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define BM_SOFT_RESET 0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /* SDMMC_BLKLEN bit fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define BLKL_CRCERR_ABORT 0x0800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define BLKL_CD_POL_HIGH 0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define BLKL_GPI_CD 0x2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define BLKL_DATA3_CD 0x4000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define BLKL_INT_ENABLE 0x8000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /* SDMMC_INTMASK0 bit fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define INT0_MBLK_TRAN_DONE_INT_EN 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define INT0_BLK_TRAN_DONE_INT_EN 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define INT0_CD_INT_EN 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define INT0_DI_INT_EN 0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /* SDMMC_INTMASK1 bit fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define INT1_CMD_RES_TRAN_DONE_INT_EN 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define INT1_CMD_RES_TOUT_INT_EN 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define INT1_MBLK_AUTO_STOP_INT_EN 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define INT1_DATA_TOUT_INT_EN 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define INT1_RESCRC_ERR_INT_EN 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define INT1_RCRC_ERR_INT_EN 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define INT1_WCRC_ERR_INT_EN 0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* SDMMC_STS0 bit fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define STS0_WRITE_PROTECT 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define STS0_CD_DATA3 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define STS0_CD_GPI 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define STS0_MBLK_DONE 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define STS0_BLK_DONE 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define STS0_CARD_DETECT 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define STS0_DEVICE_INS 0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* SDMMC_STS1 bit fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define STS1_SDIO_INT 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define STS1_CMDRSP_DONE 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define STS1_RSP_TIMEOUT 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define STS1_AUTOSTOP_DONE 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define STS1_DATA_TIMEOUT 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define STS1_RSP_CRC_ERR 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define STS1_RCRC_ERR 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define STS1_WCRC_ERR 0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /* SDMMC_STS2 bit fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define STS2_CMD_RES_BUSY 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define STS2_DATARSP_BUSY 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define STS2_DIS_FORCECLK 0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* SDMMC_EXTCTRL bit fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define EXT_EIGHTBIT 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* MMC/SD DMA Controller Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define SDDMA_GCR 0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define SDDMA_IER 0x104
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define SDDMA_ISR 0x108
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define SDDMA_DESPR 0x10C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define SDDMA_RBR 0x110
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define SDDMA_DAR 0x114
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define SDDMA_BAR 0x118
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define SDDMA_CPR 0x11C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define SDDMA_CCR 0x120
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* SDDMA_GCR bit fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define DMA_GCR_DMA_EN 0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define DMA_GCR_SOFT_RESET 0x00000100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* SDDMA_IER bit fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define DMA_IER_INT_EN 0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /* SDDMA_ISR bit fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define DMA_ISR_INT_STS 0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* SDDMA_RBR bit fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define DMA_RBR_FORMAT 0x40000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define DMA_RBR_END 0x80000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /* SDDMA_CCR bit fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #define DMA_CCR_RUN 0x00000080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define DMA_CCR_IF_TO_PERIPHERAL 0x00000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #define DMA_CCR_PERIPHERAL_TO_IF 0x00400000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* SDDMA_CCR event status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define DMA_CCR_EVT_NO_STATUS 0x00000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define DMA_CCR_EVT_UNDERRUN 0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define DMA_CCR_EVT_OVERRUN 0x00000002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define DMA_CCR_EVT_DESP_READ 0x00000003
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define DMA_CCR_EVT_DATA_RW 0x00000004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define DMA_CCR_EVT_EARLY_END 0x00000005
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #define DMA_CCR_EVT_SUCCESS 0x0000000F
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #define PDMA_READ 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #define PDMA_WRITE 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #define WMT_SD_POWER_OFF 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #define WMT_SD_POWER_ON 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct wmt_dma_descriptor {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) u32 data_buffer_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) u32 branch_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) u32 reserved1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct wmt_mci_caps {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) unsigned int f_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) unsigned int f_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) u32 ocr_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) u32 caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) u32 max_seg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) u32 max_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) u32 max_blk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct wmt_mci_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct mmc_host *mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) void __iomem *sdmmc_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) int irq_regular;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) int irq_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) void *dma_desc_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) dma_addr_t dma_desc_device_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct completion cmdcomp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct completion datacomp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct completion *comp_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct completion *comp_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct mmc_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct mmc_command *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct clk *clk_sdmmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) u8 power_inverted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) u8 cd_inverted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static void wmt_set_sd_power(struct wmt_mci_priv *priv, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) u32 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (enable ^ priv->power_inverted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) reg_tmp &= ~BM_SD_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) reg_tmp |= BM_SD_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) writeb(reg_tmp, priv->sdmmc_base + SDMMC_BUSMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static void wmt_mci_read_response(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct wmt_mci_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) int idx1, idx2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) u8 tmp_resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) u32 response;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) priv = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) for (idx1 = 0; idx1 < 4; idx1++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) response = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) for (idx2 = 0; idx2 < 4; idx2++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if ((idx1 == 3) && (idx2 == 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) tmp_resp = readb(priv->sdmmc_base + SDMMC_RSP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) tmp_resp = readb(priv->sdmmc_base + SDMMC_RSP +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) (idx1*4) + idx2 + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) response |= (tmp_resp << (idx2 * 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) priv->cmd->resp[idx1] = cpu_to_be32(response);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static void wmt_mci_start_command(struct wmt_mci_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) u32 reg_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) writeb(reg_tmp | CTLR_CMD_START, priv->sdmmc_base + SDMMC_CTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static int wmt_mci_send_command(struct mmc_host *mmc, u8 command, u8 cmdtype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) u32 arg, u8 rsptype)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct wmt_mci_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) u32 reg_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) priv = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) /* write command, arg, resptype registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) writeb(command, priv->sdmmc_base + SDMMC_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) writel(arg, priv->sdmmc_base + SDMMC_ARG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) writeb(rsptype, priv->sdmmc_base + SDMMC_RSPTYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /* reset response FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) writeb(reg_tmp | CTLR_FIFO_RESET, priv->sdmmc_base + SDMMC_CTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) /* ensure clock enabled - VT3465 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) wmt_set_sd_power(priv, WMT_SD_POWER_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /* clear status bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) writeb(0xFF, priv->sdmmc_base + SDMMC_STS2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) writeb(0xFF, priv->sdmmc_base + SDMMC_STS3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /* set command type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) writeb((reg_tmp & 0x0F) | (cmdtype << 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) priv->sdmmc_base + SDMMC_CTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static void wmt_mci_disable_dma(struct wmt_mci_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) writel(DMA_ISR_INT_STS, priv->sdmmc_base + SDDMA_ISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) writel(0, priv->sdmmc_base + SDDMA_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static void wmt_complete_data_request(struct wmt_mci_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct mmc_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) req = priv->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) req->data->bytes_xfered = req->data->blksz * req->data->blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /* unmap the DMA pages used for write data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (req->data->flags & MMC_DATA_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) req->data->sg_len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) req->data->sg_len, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /* Check if the DMA ISR returned a data error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if ((req->cmd->error) || (req->data->error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) mmc_request_done(priv->mmc, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) wmt_mci_read_response(priv->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (!req->data->stop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /* single-block read/write requests end here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) mmc_request_done(priv->mmc, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * we change the priv->cmd variable so the response is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * stored in the stop struct rather than the original
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * calling command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) priv->comp_cmd = &priv->cmdcomp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) init_completion(priv->comp_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) priv->cmd = req->data->stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) wmt_mci_send_command(priv->mmc, req->data->stop->opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 7, req->data->stop->arg, 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) wmt_mci_start_command(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) static irqreturn_t wmt_mci_dma_isr(int irq_num, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) struct wmt_mci_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) priv = (struct wmt_mci_priv *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) status = readl(priv->sdmmc_base + SDDMA_CCR) & 0x0F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (status != DMA_CCR_EVT_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) dev_err(priv->dev, "DMA Error: Status = %d\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) priv->req->data->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) complete(priv->comp_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) priv->req->data->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) wmt_mci_disable_dma(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) complete(priv->comp_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (priv->comp_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (completion_done(priv->comp_cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * if the command (regular) interrupt has already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * completed, finish off the request otherwise we wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * for the command interrupt and finish from there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) wmt_complete_data_request(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) static irqreturn_t wmt_mci_regular_isr(int irq_num, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) struct wmt_mci_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) u32 status0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) u32 status1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) u32 status2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) u32 reg_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) int cmd_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) priv = (struct wmt_mci_priv *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) cmd_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) status0 = readb(priv->sdmmc_base + SDMMC_STS0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) status1 = readb(priv->sdmmc_base + SDMMC_STS1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) status2 = readb(priv->sdmmc_base + SDMMC_STS2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /* Check for card insertion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) reg_tmp = readb(priv->sdmmc_base + SDMMC_INTMASK0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if ((reg_tmp & INT0_DI_INT_EN) && (status0 & STS0_DEVICE_INS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) mmc_detect_change(priv->mmc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (priv->cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) priv->cmd->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (priv->comp_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) complete(priv->comp_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (priv->comp_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) wmt_mci_disable_dma(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) complete(priv->comp_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) writeb(STS0_DEVICE_INS, priv->sdmmc_base + SDMMC_STS0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if ((!priv->req->data) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) ((priv->req->data->stop) && (priv->cmd == priv->req->data->stop))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /* handle non-data & stop_transmission requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (status1 & STS1_CMDRSP_DONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) priv->cmd->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) cmd_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) } else if ((status1 & STS1_RSP_TIMEOUT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) (status1 & STS1_DATA_TIMEOUT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) priv->cmd->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) cmd_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (cmd_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) priv->comp_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (!priv->cmd->error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) wmt_mci_read_response(priv->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) priv->cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) mmc_request_done(priv->mmc, priv->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) /* handle data requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (status1 & STS1_CMDRSP_DONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (priv->cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) priv->cmd->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (priv->comp_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) complete(priv->comp_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if ((status1 & STS1_RSP_TIMEOUT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) (status1 & STS1_DATA_TIMEOUT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (priv->cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) priv->cmd->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (priv->comp_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) complete(priv->comp_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (priv->comp_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) wmt_mci_disable_dma(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) complete(priv->comp_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (priv->comp_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * If the dma interrupt has already completed, finish
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * off the request; otherwise we wait for the DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * interrupt and finish from there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (completion_done(priv->comp_dma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) wmt_complete_data_request(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) writeb(status0, priv->sdmmc_base + SDMMC_STS0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) writeb(status1, priv->sdmmc_base + SDMMC_STS1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) writeb(status2, priv->sdmmc_base + SDMMC_STS2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) static void wmt_reset_hardware(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) struct wmt_mci_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) u32 reg_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) priv = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /* reset controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + SDMMC_BUSMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) /* reset response FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) writeb(reg_tmp | CTLR_FIFO_RESET, priv->sdmmc_base + SDMMC_CTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) /* enable GPI pin to detect card */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) writew(BLKL_INT_ENABLE | BLKL_GPI_CD, priv->sdmmc_base + SDMMC_BLKLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) /* clear interrupt status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /* setup interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) writeb(INT0_CD_INT_EN | INT0_DI_INT_EN, priv->sdmmc_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) SDMMC_INTMASK0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) writeb(INT1_DATA_TOUT_INT_EN | INT1_CMD_RES_TRAN_DONE_INT_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) INT1_CMD_RES_TOUT_INT_EN, priv->sdmmc_base + SDMMC_INTMASK1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) /* set the DMA timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) writew(8191, priv->sdmmc_base + SDMMC_DMATIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /* auto clock freezing enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) reg_tmp = readb(priv->sdmmc_base + SDMMC_STS2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) writeb(reg_tmp | STS2_DIS_FORCECLK, priv->sdmmc_base + SDMMC_STS2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) /* set a default clock speed of 400Khz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) clk_set_rate(priv->clk_sdmmc, 400000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) static int wmt_dma_init(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct wmt_mci_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) priv = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) writel(DMA_GCR_SOFT_RESET, priv->sdmmc_base + SDDMA_GCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) writel(DMA_GCR_DMA_EN, priv->sdmmc_base + SDDMA_GCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if ((readl(priv->sdmmc_base + SDDMA_GCR) & DMA_GCR_DMA_EN) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) static void wmt_dma_init_descriptor(struct wmt_dma_descriptor *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) u16 req_count, u32 buffer_addr, u32 branch_addr, int end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) desc->flags = 0x40000000 | req_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) desc->flags |= 0x80000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) desc->data_buffer_addr = buffer_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) desc->branch_addr = branch_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) static void wmt_dma_config(struct mmc_host *mmc, u32 descaddr, u8 dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) struct wmt_mci_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) u32 reg_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) priv = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) /* Enable DMA Interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) writel(DMA_IER_INT_EN, priv->sdmmc_base + SDDMA_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) /* Write DMA Descriptor Pointer Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) writel(descaddr, priv->sdmmc_base + SDDMA_DESPR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) writel(0x00, priv->sdmmc_base + SDDMA_CCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (dir == PDMA_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) writel(reg_tmp & DMA_CCR_IF_TO_PERIPHERAL, priv->sdmmc_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) SDDMA_CCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) writel(reg_tmp | DMA_CCR_PERIPHERAL_TO_IF, priv->sdmmc_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) SDDMA_CCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) static void wmt_dma_start(struct wmt_mci_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) u32 reg_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) writel(reg_tmp | DMA_CCR_RUN, priv->sdmmc_base + SDDMA_CCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) static void wmt_mci_request(struct mmc_host *mmc, struct mmc_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) struct wmt_mci_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) struct wmt_dma_descriptor *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) u8 command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) u8 cmdtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) u32 arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) u8 rsptype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) u32 reg_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) int sg_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) u32 dma_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) int desc_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) priv = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) priv->req = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * Use the cmd variable to pass a pointer to the resp[] structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * This is required on multi-block requests to pass the pointer to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * stop command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) priv->cmd = req->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) command = req->cmd->opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) arg = req->cmd->arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) rsptype = mmc_resp_type(req->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) cmdtype = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) /* rsptype=7 only valid for SPI commands - should be =2 for SD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (rsptype == 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) rsptype = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) /* rsptype=21 is R1B, convert for controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (rsptype == 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) rsptype = 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (!req->data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) wmt_mci_send_command(mmc, command, cmdtype, arg, rsptype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) wmt_mci_start_command(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) /* completion is now handled in the regular_isr() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (req->data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) priv->comp_cmd = &priv->cmdcomp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) init_completion(priv->comp_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) wmt_dma_init(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) /* set controller data length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) writew((reg_tmp & 0xF800) | (req->data->blksz - 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) priv->sdmmc_base + SDMMC_BLKLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) /* set controller block count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) writew(req->data->blocks, priv->sdmmc_base + SDMMC_BLKCNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) desc = (struct wmt_dma_descriptor *)priv->dma_desc_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (req->data->flags & MMC_DATA_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) req->data->sg_len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) cmdtype = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (req->data->blocks > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) cmdtype = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) req->data->sg_len, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) cmdtype = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (req->data->blocks > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) cmdtype = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) dma_address = priv->dma_desc_device_addr + 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) desc_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) for_each_sg(req->data->sg, sg, sg_cnt, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) while (offset < sg_dma_len(sg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) wmt_dma_init_descriptor(desc, req->data->blksz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) sg_dma_address(sg)+offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) dma_address, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) desc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) desc_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) offset += req->data->blksz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) dma_address += 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (desc_cnt == req->data->blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) desc--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) desc->flags |= 0x80000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (req->data->flags & MMC_DATA_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) wmt_dma_config(mmc, priv->dma_desc_device_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) PDMA_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) wmt_dma_config(mmc, priv->dma_desc_device_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) PDMA_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) wmt_mci_send_command(mmc, command, cmdtype, arg, rsptype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) priv->comp_dma = &priv->datacomp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) init_completion(priv->comp_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) wmt_dma_start(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) wmt_mci_start_command(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) static void wmt_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) struct wmt_mci_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) u32 busmode, extctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) priv = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (ios->power_mode == MMC_POWER_UP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) wmt_reset_hardware(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) wmt_set_sd_power(priv, WMT_SD_POWER_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (ios->power_mode == MMC_POWER_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) wmt_set_sd_power(priv, WMT_SD_POWER_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (ios->clock != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) clk_set_rate(priv->clk_sdmmc, ios->clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) busmode = readb(priv->sdmmc_base + SDMMC_BUSMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) extctrl = readb(priv->sdmmc_base + SDMMC_EXTCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) busmode &= ~(BM_EIGHTBIT_MODE | BM_FOURBIT_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) extctrl &= ~EXT_EIGHTBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) switch (ios->bus_width) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) case MMC_BUS_WIDTH_8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) busmode |= BM_EIGHTBIT_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) extctrl |= EXT_EIGHTBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) case MMC_BUS_WIDTH_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) busmode |= BM_FOURBIT_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) case MMC_BUS_WIDTH_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) writeb(busmode, priv->sdmmc_base + SDMMC_BUSMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) writeb(extctrl, priv->sdmmc_base + SDMMC_EXTCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) static int wmt_mci_get_ro(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) struct wmt_mci_priv *priv = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) return !(readb(priv->sdmmc_base + SDMMC_STS0) & STS0_WRITE_PROTECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) static int wmt_mci_get_cd(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) struct wmt_mci_priv *priv = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) u32 cd = (readb(priv->sdmmc_base + SDMMC_STS0) & STS0_CD_GPI) >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) return !(cd ^ priv->cd_inverted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) static const struct mmc_host_ops wmt_mci_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) .request = wmt_mci_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) .set_ios = wmt_mci_set_ios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) .get_ro = wmt_mci_get_ro,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) .get_cd = wmt_mci_get_cd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) /* Controller capabilities */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) static struct wmt_mci_caps wm8505_caps = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) .f_min = 390425,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) .f_max = 50000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) .ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MMC_HIGHSPEED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) MMC_CAP_SD_HIGHSPEED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) .max_seg_size = 65024,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) .max_segs = 128,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) .max_blk_size = 2048,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) static const struct of_device_id wmt_mci_dt_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) { .compatible = "wm,wm8505-sdhc", .data = &wm8505_caps },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) { /* Sentinel */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) static int wmt_mci_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) struct mmc_host *mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) struct wmt_mci_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) struct device_node *np = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) const struct of_device_id *of_id =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) of_match_device(wmt_mci_dt_ids, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) const struct wmt_mci_caps *wmt_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) int regular_irq, dma_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) if (!of_id || !of_id->data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) dev_err(&pdev->dev, "Controller capabilities data missing\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) wmt_caps = of_id->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) if (!np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) dev_err(&pdev->dev, "Missing SDMMC description in devicetree\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) regular_irq = irq_of_parse_and_map(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) dma_irq = irq_of_parse_and_map(np, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (!regular_irq || !dma_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) dev_err(&pdev->dev, "Getting IRQs failed!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) goto fail1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) mmc = mmc_alloc_host(sizeof(struct wmt_mci_priv), &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) if (!mmc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) dev_err(&pdev->dev, "Failed to allocate mmc_host\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) goto fail1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) mmc->ops = &wmt_mci_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) mmc->f_min = wmt_caps->f_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) mmc->f_max = wmt_caps->f_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) mmc->ocr_avail = wmt_caps->ocr_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) mmc->caps = wmt_caps->caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) mmc->max_seg_size = wmt_caps->max_seg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) mmc->max_segs = wmt_caps->max_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) mmc->max_blk_size = wmt_caps->max_blk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) mmc->max_req_size = (16*512*mmc->max_segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) mmc->max_blk_count = mmc->max_req_size / 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) priv = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) priv->mmc = mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) priv->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) priv->power_inverted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) priv->cd_inverted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (of_get_property(np, "sdon-inverted", NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) priv->power_inverted = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (of_get_property(np, "cd-inverted", NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) priv->cd_inverted = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) priv->sdmmc_base = of_iomap(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (!priv->sdmmc_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) dev_err(&pdev->dev, "Failed to map IO space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) goto fail2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) priv->irq_regular = regular_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) priv->irq_dma = dma_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) ret = request_irq(regular_irq, wmt_mci_regular_isr, 0, "sdmmc", priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) dev_err(&pdev->dev, "Register regular IRQ fail\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) goto fail3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) ret = request_irq(dma_irq, wmt_mci_dma_isr, 0, "sdmmc", priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) dev_err(&pdev->dev, "Register DMA IRQ fail\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) goto fail4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) /* alloc some DMA buffers for descriptors/transfers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) priv->dma_desc_buffer = dma_alloc_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) mmc->max_blk_count * 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) &priv->dma_desc_device_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) if (!priv->dma_desc_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) dev_err(&pdev->dev, "DMA alloc fail\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) ret = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) goto fail5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) platform_set_drvdata(pdev, mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) priv->clk_sdmmc = of_clk_get(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (IS_ERR(priv->clk_sdmmc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) dev_err(&pdev->dev, "Error getting clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) ret = PTR_ERR(priv->clk_sdmmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) goto fail5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) ret = clk_prepare_enable(priv->clk_sdmmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) goto fail6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) /* configure the controller to a known 'ready' state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) wmt_reset_hardware(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) mmc_add_host(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) dev_info(&pdev->dev, "WMT SDHC Controller initialized\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) fail6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) clk_put(priv->clk_sdmmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) fail5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) free_irq(dma_irq, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) fail4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) free_irq(regular_irq, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) fail3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) iounmap(priv->sdmmc_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) fail2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) mmc_free_host(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) fail1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) static int wmt_mci_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) struct mmc_host *mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) struct wmt_mci_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) u32 reg_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) mmc = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) priv = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) /* reset SD controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) writel(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + SDMMC_BUSMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) writew(reg_tmp & ~(0xA000), priv->sdmmc_base + SDMMC_BLKLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) /* release the dma buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) dma_free_coherent(&pdev->dev, priv->mmc->max_blk_count * 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) priv->dma_desc_buffer, priv->dma_desc_device_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) mmc_remove_host(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) free_irq(priv->irq_regular, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) free_irq(priv->irq_dma, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) iounmap(priv->sdmmc_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) clk_disable_unprepare(priv->clk_sdmmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) clk_put(priv->clk_sdmmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) release_mem_region(res->start, resource_size(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) mmc_free_host(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) dev_info(&pdev->dev, "WMT MCI device removed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) static int wmt_mci_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) u32 reg_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) struct mmc_host *mmc = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) struct wmt_mci_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (!mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) priv = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) SDMMC_BUSMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) writew(reg_tmp & 0x5FFF, priv->sdmmc_base + SDMMC_BLKLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) clk_disable(priv->clk_sdmmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) static int wmt_mci_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) u32 reg_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) struct mmc_host *mmc = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) struct wmt_mci_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (mmc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) priv = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) clk_enable(priv->clk_sdmmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) SDMMC_BUSMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) writew(reg_tmp | (BLKL_GPI_CD | BLKL_INT_ENABLE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) priv->sdmmc_base + SDMMC_BLKLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) reg_tmp = readb(priv->sdmmc_base + SDMMC_INTMASK0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) writeb(reg_tmp | INT0_DI_INT_EN, priv->sdmmc_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) SDMMC_INTMASK0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) static const struct dev_pm_ops wmt_mci_pm = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) .suspend = wmt_mci_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) .resume = wmt_mci_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) #define wmt_mci_pm_ops (&wmt_mci_pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) #else /* !CONFIG_PM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) #define wmt_mci_pm_ops NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) static struct platform_driver wmt_mci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) .probe = wmt_mci_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) .remove = wmt_mci_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) .name = DRIVER_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) .probe_type = PROBE_PREFER_ASYNCHRONOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) .pm = wmt_mci_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) .of_match_table = wmt_mci_dt_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) module_platform_driver(wmt_mci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) MODULE_DESCRIPTION("Wondermedia MMC/SD Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) MODULE_AUTHOR("Tony Prisk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) MODULE_DEVICE_TABLE(of, wmt_mci_dt_ids);