^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Atmel MultiMedia Card Interface driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2004-2008 Atmel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/gpio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/of_gpio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/mmc/host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/mmc/sdio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/atmel-mci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/atmel_pdc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/pm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/pinctrl/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * Superset of MCI IP registers integrated in Atmel AT91 Processor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * Registers and bitfields marked with [2] are only available in MCI2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /* MCI Register Definitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define ATMCI_CR 0x0000 /* Control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define ATMCI_CR_MCIEN BIT(0) /* MCI Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define ATMCI_CR_MCIDIS BIT(1) /* MCI Disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define ATMCI_CR_PWSEN BIT(2) /* Power Save Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define ATMCI_CR_PWSDIS BIT(3) /* Power Save Disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define ATMCI_CR_SWRST BIT(7) /* Software Reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define ATMCI_MR 0x0004 /* Mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define ATMCI_MR_CLKDIV(x) ((x) << 0) /* Clock Divider */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define ATMCI_MR_PWSDIV(x) ((x) << 8) /* Power Saving Divider */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define ATMCI_MR_RDPROOF BIT(11) /* Read Proof */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define ATMCI_MR_WRPROOF BIT(12) /* Write Proof */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define ATMCI_MR_PDCFBYTE BIT(13) /* Force Byte Transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define ATMCI_MR_PDCPADV BIT(14) /* Padding Value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define ATMCI_MR_PDCMODE BIT(15) /* PDC-oriented Mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define ATMCI_MR_CLKODD(x) ((x) << 16) /* LSB of Clock Divider */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define ATMCI_DTOR 0x0008 /* Data Timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define ATMCI_DTOCYC(x) ((x) << 0) /* Data Timeout Cycles */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define ATMCI_DTOMUL(x) ((x) << 4) /* Data Timeout Multiplier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define ATMCI_SDCR 0x000c /* SD Card / SDIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define ATMCI_SDCSEL_SLOT_A (0 << 0) /* Select SD slot A */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define ATMCI_SDCSEL_SLOT_B (1 << 0) /* Select SD slot A */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define ATMCI_SDCSEL_MASK (3 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define ATMCI_SDCBUS_1BIT (0 << 6) /* 1-bit data bus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define ATMCI_SDCBUS_4BIT (2 << 6) /* 4-bit data bus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define ATMCI_SDCBUS_8BIT (3 << 6) /* 8-bit data bus[2] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define ATMCI_SDCBUS_MASK (3 << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define ATMCI_ARGR 0x0010 /* Command Argument */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define ATMCI_CMDR 0x0014 /* Command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define ATMCI_CMDR_CMDNB(x) ((x) << 0) /* Command Opcode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define ATMCI_CMDR_RSPTYP_NONE (0 << 6) /* No response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define ATMCI_CMDR_RSPTYP_48BIT (1 << 6) /* 48-bit response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define ATMCI_CMDR_RSPTYP_136BIT (2 << 6) /* 136-bit response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define ATMCI_CMDR_SPCMD_INIT (1 << 8) /* Initialization command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define ATMCI_CMDR_SPCMD_SYNC (2 << 8) /* Synchronized command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define ATMCI_CMDR_SPCMD_INT (4 << 8) /* Interrupt command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define ATMCI_CMDR_SPCMD_INTRESP (5 << 8) /* Interrupt response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define ATMCI_CMDR_OPDCMD (1 << 11) /* Open Drain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define ATMCI_CMDR_MAXLAT_5CYC (0 << 12) /* Max latency 5 cycles */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define ATMCI_CMDR_MAXLAT_64CYC (1 << 12) /* Max latency 64 cycles */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define ATMCI_CMDR_START_XFER (1 << 16) /* Start data transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define ATMCI_CMDR_STOP_XFER (2 << 16) /* Stop data transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define ATMCI_CMDR_TRDIR_WRITE (0 << 18) /* Write data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define ATMCI_CMDR_TRDIR_READ (1 << 18) /* Read data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define ATMCI_CMDR_BLOCK (0 << 19) /* Single-block transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define ATMCI_CMDR_MULTI_BLOCK (1 << 19) /* Multi-block transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define ATMCI_CMDR_STREAM (2 << 19) /* MMC Stream transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define ATMCI_CMDR_SDIO_BYTE (4 << 19) /* SDIO Byte transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define ATMCI_CMDR_SDIO_BLOCK (5 << 19) /* SDIO Block transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define ATMCI_CMDR_SDIO_SUSPEND (1 << 24) /* SDIO Suspend Command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define ATMCI_CMDR_SDIO_RESUME (2 << 24) /* SDIO Resume Command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define ATMCI_BLKR 0x0018 /* Block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define ATMCI_BCNT(x) ((x) << 0) /* Data Block Count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define ATMCI_BLKLEN(x) ((x) << 16) /* Data Block Length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define ATMCI_CSTOR 0x001c /* Completion Signal Timeout[2] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define ATMCI_CSTOCYC(x) ((x) << 0) /* CST cycles */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define ATMCI_CSTOMUL(x) ((x) << 4) /* CST multiplier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define ATMCI_RSPR 0x0020 /* Response 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define ATMCI_RSPR1 0x0024 /* Response 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define ATMCI_RSPR2 0x0028 /* Response 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define ATMCI_RSPR3 0x002c /* Response 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define ATMCI_RDR 0x0030 /* Receive Data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define ATMCI_TDR 0x0034 /* Transmit Data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define ATMCI_SR 0x0040 /* Status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define ATMCI_IER 0x0044 /* Interrupt Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define ATMCI_IDR 0x0048 /* Interrupt Disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define ATMCI_IMR 0x004c /* Interrupt Mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define ATMCI_CMDRDY BIT(0) /* Command Ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define ATMCI_RXRDY BIT(1) /* Receiver Ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define ATMCI_TXRDY BIT(2) /* Transmitter Ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define ATMCI_BLKE BIT(3) /* Data Block Ended */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define ATMCI_DTIP BIT(4) /* Data Transfer In Progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define ATMCI_NOTBUSY BIT(5) /* Data Not Busy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define ATMCI_ENDRX BIT(6) /* End of RX Buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define ATMCI_ENDTX BIT(7) /* End of TX Buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define ATMCI_SDIOIRQA BIT(8) /* SDIO IRQ in slot A */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define ATMCI_SDIOIRQB BIT(9) /* SDIO IRQ in slot B */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define ATMCI_SDIOWAIT BIT(12) /* SDIO Read Wait Operation Status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define ATMCI_CSRCV BIT(13) /* CE-ATA Completion Signal Received */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define ATMCI_RXBUFF BIT(14) /* RX Buffer Full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define ATMCI_TXBUFE BIT(15) /* TX Buffer Empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define ATMCI_RINDE BIT(16) /* Response Index Error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define ATMCI_RDIRE BIT(17) /* Response Direction Error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define ATMCI_RCRCE BIT(18) /* Response CRC Error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define ATMCI_RENDE BIT(19) /* Response End Bit Error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define ATMCI_RTOE BIT(20) /* Response Time-Out Error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define ATMCI_DCRCE BIT(21) /* Data CRC Error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define ATMCI_DTOE BIT(22) /* Data Time-Out Error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define ATMCI_CSTOE BIT(23) /* Completion Signal Time-out Error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define ATMCI_BLKOVRE BIT(24) /* DMA Block Overrun Error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define ATMCI_DMADONE BIT(25) /* DMA Transfer Done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define ATMCI_FIFOEMPTY BIT(26) /* FIFO Empty Flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define ATMCI_XFRDONE BIT(27) /* Transfer Done Flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define ATMCI_ACKRCV BIT(28) /* Boot Operation Acknowledge Received */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define ATMCI_ACKRCVE BIT(29) /* Boot Operation Acknowledge Error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define ATMCI_OVRE BIT(30) /* RX Overrun Error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define ATMCI_UNRE BIT(31) /* TX Underrun Error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define ATMCI_DMA 0x0050 /* DMA Configuration[2] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define ATMCI_DMA_OFFSET(x) ((x) << 0) /* DMA Write Buffer Offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define ATMCI_DMA_CHKSIZE(x) ((x) << 4) /* DMA Channel Read and Write Chunk Size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #define ATMCI_DMAEN BIT(8) /* DMA Hardware Handshaking Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #define ATMCI_CFG 0x0054 /* Configuration[2] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define ATMCI_CFG_FIFOMODE_1DATA BIT(0) /* MCI Internal FIFO control mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #define ATMCI_CFG_FERRCTRL_COR BIT(4) /* Flow Error flag reset control mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #define ATMCI_CFG_HSMODE BIT(8) /* High Speed Mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #define ATMCI_CFG_LSYNC BIT(12) /* Synchronize on the last block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define ATMCI_WPMR 0x00e4 /* Write Protection Mode[2] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define ATMCI_WP_EN BIT(0) /* WP Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define ATMCI_WP_KEY (0x4d4349 << 8) /* WP Key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define ATMCI_WPSR 0x00e8 /* Write Protection Status[2] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define ATMCI_GET_WP_VS(x) ((x) & 0x0f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define ATMCI_GET_WP_VSRC(x) (((x) >> 8) & 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #define ATMCI_VERSION 0x00FC /* Version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define ATMCI_FIFO_APERTURE 0x0200 /* FIFO Aperture[2] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /* This is not including the FIFO Aperture on MCI2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #define ATMCI_REGS_SIZE 0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* Register access macros */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #define atmci_readl(port, reg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) __raw_readl((port)->regs + reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #define atmci_writel(port, reg, value) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) __raw_writel((value), (port)->regs + reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #define ATMCI_CMD_TIMEOUT_MS 2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #define AUTOSUSPEND_DELAY 50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define ATMCI_DATA_ERROR_FLAGS (ATMCI_DCRCE | ATMCI_DTOE | ATMCI_OVRE | ATMCI_UNRE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define ATMCI_DMA_THRESHOLD 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) EVENT_CMD_RDY = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) EVENT_XFER_COMPLETE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) EVENT_NOTBUSY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) EVENT_DATA_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) enum atmel_mci_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) STATE_IDLE = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) STATE_SENDING_CMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) STATE_DATA_XFER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) STATE_WAITING_NOTBUSY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) STATE_SENDING_STOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) STATE_END_REQUEST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) enum atmci_xfer_dir {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) XFER_RECEIVE = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) XFER_TRANSMIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) enum atmci_pdc_buf {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) PDC_FIRST_BUF = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) PDC_SECOND_BUF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct atmel_mci_caps {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) bool has_dma_conf_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) bool has_pdc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) bool has_cfg_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) bool has_cstor_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) bool has_highspeed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) bool has_rwproof;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) bool has_odd_clk_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) bool has_bad_data_ordering;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) bool need_reset_after_xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) bool need_blksz_mul_4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) bool need_notbusy_for_read_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct atmel_mci_dma {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct dma_async_tx_descriptor *data_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * struct atmel_mci - MMC controller state shared between all slots
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * @lock: Spinlock protecting the queue and associated data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * @regs: Pointer to MMIO registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * @sg: Scatterlist entry currently being processed by PIO or PDC code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * @sg_len: Size of the scatterlist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * @pio_offset: Offset into the current scatterlist entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * @buffer: Buffer used if we don't have the r/w proof capability. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * don't have the time to switch pdc buffers so we have to use only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * one buffer for the full transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * @buf_size: size of the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * @buf_phys_addr: buffer address needed for pdc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * @cur_slot: The slot which is currently using the controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * @mrq: The request currently being processed on @cur_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * or NULL if the controller is idle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * @cmd: The command currently being sent to the card, or NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * @data: The data currently being transferred, or NULL if no data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * transfer is in progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * @data_size: just data->blocks * data->blksz.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * @dma: DMA client state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * @data_chan: DMA channel being used for the current data transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * @dma_conf: Configuration for the DMA slave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * @cmd_status: Snapshot of SR taken upon completion of the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * command. Only valid when EVENT_CMD_COMPLETE is pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * @data_status: Snapshot of SR taken upon completion of the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * data transfer. Only valid when EVENT_DATA_COMPLETE or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * EVENT_DATA_ERROR is pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * @stop_cmdr: Value to be loaded into CMDR when the stop command is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * to be sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * @tasklet: Tasklet running the request state machine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * @pending_events: Bitmask of events flagged by the interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * to be processed by the tasklet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * @completed_events: Bitmask of events which the state machine has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * @state: Tasklet state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * @queue: List of slots waiting for access to the controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * @need_clock_update: Update the clock rate before the next request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * @need_reset: Reset controller before next request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * @timer: Timer to balance the data timeout error flag which cannot rise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * @mode_reg: Value of the MR register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * @cfg_reg: Value of the CFG register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * rate and timeout calculations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * @mapbase: Physical address of the MMIO registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * @mck: The peripheral bus clock hooked up to the MMC controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * @pdev: Platform device associated with the MMC controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * @slot: Slots sharing this MMC controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * @caps: MCI capabilities depending on MCI version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * @prepare_data: function to setup MCI before data transfer which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * depends on MCI capabilities.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * @submit_data: function to start data transfer which depends on MCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * capabilities.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * @stop_transfer: function to stop data transfer which depends on MCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * capabilities.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * Locking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * =======
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * @lock is a softirq-safe spinlock protecting @queue as well as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * @cur_slot, @mrq and @state. These must always be updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * at the same time while holding @lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * @lock also protects mode_reg and need_clock_update since these are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * used to synchronize mode register updates with the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * The @mrq field of struct atmel_mci_slot is also protected by @lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * and must always be written at the same time as the slot is added to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * @queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * @pending_events and @completed_events are accessed using atomic bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * operations, so they don't need any locking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * None of the fields touched by the interrupt handler need any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * locking. However, ordering is important: Before EVENT_DATA_ERROR or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * EVENT_DATA_COMPLETE is set in @pending_events, all data-related
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * interrupts must be disabled and @data_status updated with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * CMDRDY interrupt must be disabled and @cmd_status updated with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * bytes_xfered field of @data must be written. This is ensured by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * using barriers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct atmel_mci {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) void __iomem *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) unsigned int sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) unsigned int pio_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) unsigned int *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) unsigned int buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) dma_addr_t buf_phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) struct atmel_mci_slot *cur_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct mmc_request *mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct mmc_command *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) struct mmc_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) unsigned int data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct atmel_mci_dma dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct dma_chan *data_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct dma_slave_config dma_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) u32 cmd_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) u32 data_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) u32 stop_cmdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) struct tasklet_struct tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) unsigned long pending_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) unsigned long completed_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) enum atmel_mci_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) struct list_head queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) bool need_clock_update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) bool need_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct timer_list timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) u32 mode_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) u32 cfg_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) unsigned long bus_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) unsigned long mapbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct clk *mck;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct platform_device *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct atmel_mci_slot *slot[ATMCI_MAX_NR_SLOTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct atmel_mci_caps caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) u32 (*prepare_data)(struct atmel_mci *host, struct mmc_data *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) void (*submit_data)(struct atmel_mci *host, struct mmc_data *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) void (*stop_transfer)(struct atmel_mci *host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * struct atmel_mci_slot - MMC slot state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * @mmc: The mmc_host representing this slot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * @host: The MMC controller this slot is using.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * @sdc_reg: Value of SDCR to be written before using this slot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * @sdio_irq: SDIO irq mask for this slot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * @mrq: mmc_request currently being processed or waiting to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * processed, or NULL when the slot is idle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * @queue_node: List node for placing this node in the @queue list of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * &struct atmel_mci.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * @clock: Clock rate configured by set_ios(). Protected by host->lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * @flags: Random state bits associated with the slot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * @detect_pin: GPIO pin used for card detection, or negative if not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * @wp_pin: GPIO pin used for card write protect sending, or negative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * if not available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * @detect_is_active_high: The state of the detect pin when it is active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * @detect_timer: Timer used for debouncing @detect_pin interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct atmel_mci_slot {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct mmc_host *mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct atmel_mci *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) u32 sdc_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) u32 sdio_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) struct mmc_request *mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct list_head queue_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) unsigned int clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) #define ATMCI_CARD_PRESENT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) #define ATMCI_CARD_NEED_INIT 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) #define ATMCI_SHUTDOWN 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) int detect_pin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) int wp_pin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) bool detect_is_active_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct timer_list detect_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) #define atmci_test_and_clear_pending(host, event) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) test_and_clear_bit(event, &host->pending_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) #define atmci_set_completed(host, event) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) set_bit(event, &host->completed_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) #define atmci_set_pending(host, event) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) set_bit(event, &host->pending_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * The debugfs stuff below is mostly optimized away when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * CONFIG_DEBUG_FS is not set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) static int atmci_req_show(struct seq_file *s, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) struct atmel_mci_slot *slot = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) struct mmc_request *mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) struct mmc_command *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) struct mmc_command *stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) struct mmc_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /* Make sure we get a consistent snapshot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) spin_lock_bh(&slot->host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) mrq = slot->mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (mrq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) cmd = mrq->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) data = mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) stop = mrq->stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) seq_printf(s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) cmd->opcode, cmd->arg, cmd->flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) cmd->resp[0], cmd->resp[1], cmd->resp[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) cmd->resp[3], cmd->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) data->bytes_xfered, data->blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) data->blksz, data->flags, data->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) seq_printf(s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) stop->opcode, stop->arg, stop->flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) stop->resp[0], stop->resp[1], stop->resp[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) stop->resp[3], stop->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) spin_unlock_bh(&slot->host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) DEFINE_SHOW_ATTRIBUTE(atmci_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) static void atmci_show_status_reg(struct seq_file *s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) const char *regname, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static const char *sr_bit[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) [0] = "CMDRDY",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) [1] = "RXRDY",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) [2] = "TXRDY",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) [3] = "BLKE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) [4] = "DTIP",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) [5] = "NOTBUSY",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) [6] = "ENDRX",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) [7] = "ENDTX",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) [8] = "SDIOIRQA",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) [9] = "SDIOIRQB",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) [12] = "SDIOWAIT",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) [14] = "RXBUFF",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) [15] = "TXBUFE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) [16] = "RINDE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) [17] = "RDIRE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) [18] = "RCRCE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) [19] = "RENDE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) [20] = "RTOE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) [21] = "DCRCE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) [22] = "DTOE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) [23] = "CSTOE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) [24] = "BLKOVRE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) [25] = "DMADONE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) [26] = "FIFOEMPTY",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) [27] = "XFRDONE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) [30] = "OVRE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) [31] = "UNRE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) seq_printf(s, "%s:\t0x%08x", regname, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) for (i = 0; i < ARRAY_SIZE(sr_bit); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (value & (1 << i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (sr_bit[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) seq_printf(s, " %s", sr_bit[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) seq_puts(s, " UNKNOWN");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) seq_putc(s, '\n');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) static int atmci_regs_show(struct seq_file *s, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) struct atmel_mci *host = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) u32 *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) buf = kmalloc(ATMCI_REGS_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) pm_runtime_get_sync(&host->pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * Grab a more or less consistent snapshot. Note that we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * not disabling interrupts, so IMR and SR may not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * consistent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) spin_lock_bh(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) memcpy_fromio(buf, host->regs, ATMCI_REGS_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) spin_unlock_bh(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) pm_runtime_mark_last_busy(&host->pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) pm_runtime_put_autosuspend(&host->pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) seq_printf(s, "MR:\t0x%08x%s%s ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) buf[ATMCI_MR / 4],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) buf[ATMCI_MR / 4] & ATMCI_MR_RDPROOF ? " RDPROOF" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) buf[ATMCI_MR / 4] & ATMCI_MR_WRPROOF ? " WRPROOF" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (host->caps.has_odd_clk_div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) seq_printf(s, "{CLKDIV,CLKODD}=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) ((buf[ATMCI_MR / 4] & 0xff) << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) | ((buf[ATMCI_MR / 4] >> 16) & 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) seq_printf(s, "CLKDIV=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) (buf[ATMCI_MR / 4] & 0xff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) seq_printf(s, "DTOR:\t0x%08x\n", buf[ATMCI_DTOR / 4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) seq_printf(s, "SDCR:\t0x%08x\n", buf[ATMCI_SDCR / 4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) seq_printf(s, "ARGR:\t0x%08x\n", buf[ATMCI_ARGR / 4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) seq_printf(s, "BLKR:\t0x%08x BCNT=%u BLKLEN=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) buf[ATMCI_BLKR / 4],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) buf[ATMCI_BLKR / 4] & 0xffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) (buf[ATMCI_BLKR / 4] >> 16) & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (host->caps.has_cstor_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) seq_printf(s, "CSTOR:\t0x%08x\n", buf[ATMCI_CSTOR / 4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /* Don't read RSPR and RDR; it will consume the data there */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) atmci_show_status_reg(s, "SR", buf[ATMCI_SR / 4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) atmci_show_status_reg(s, "IMR", buf[ATMCI_IMR / 4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (host->caps.has_dma_conf_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) val = buf[ATMCI_DMA / 4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) seq_printf(s, "DMA:\t0x%08x OFFSET=%u CHKSIZE=%u%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) val, val & 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) ((val >> 4) & 3) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 1 << (((val >> 4) & 3) + 1) : 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) val & ATMCI_DMAEN ? " DMAEN" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (host->caps.has_cfg_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) val = buf[ATMCI_CFG / 4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) val & ATMCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) val & ATMCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) val & ATMCI_CFG_HSMODE ? " HSMODE" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) val & ATMCI_CFG_LSYNC ? " LSYNC" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) DEFINE_SHOW_ATTRIBUTE(atmci_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) static void atmci_init_debugfs(struct atmel_mci_slot *slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) struct mmc_host *mmc = slot->mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) struct atmel_mci *host = slot->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct dentry *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) root = mmc->debugfs_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (!root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) debugfs_create_file("regs", S_IRUSR, root, host, &atmci_regs_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) debugfs_create_file("req", S_IRUSR, root, slot, &atmci_req_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) debugfs_create_u32("state", S_IRUSR, root, &host->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) debugfs_create_xul("pending_events", S_IRUSR, root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) &host->pending_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) debugfs_create_xul("completed_events", S_IRUSR, root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) &host->completed_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) #if defined(CONFIG_OF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) static const struct of_device_id atmci_dt_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) { .compatible = "atmel,hsmci" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) { /* sentinel */ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) MODULE_DEVICE_TABLE(of, atmci_dt_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) static struct mci_platform_data*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) atmci_of_init(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) struct device_node *np = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) struct device_node *cnp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) struct mci_platform_data *pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) u32 slot_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (!np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) dev_err(&pdev->dev, "device node not found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (!pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) for_each_child_of_node(np, cnp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (of_property_read_u32(cnp, "reg", &slot_id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) dev_warn(&pdev->dev, "reg property is missing for %pOF\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) cnp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (slot_id >= ATMCI_MAX_NR_SLOTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) dev_warn(&pdev->dev, "can't have more than %d slots\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) ATMCI_MAX_NR_SLOTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) of_node_put(cnp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (of_property_read_u32(cnp, "bus-width",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) &pdata->slot[slot_id].bus_width))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) pdata->slot[slot_id].bus_width = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) pdata->slot[slot_id].detect_pin =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) of_get_named_gpio(cnp, "cd-gpios", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) pdata->slot[slot_id].detect_is_active_high =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) of_property_read_bool(cnp, "cd-inverted");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) pdata->slot[slot_id].non_removable =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) of_property_read_bool(cnp, "non-removable");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) pdata->slot[slot_id].wp_pin =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) of_get_named_gpio(cnp, "wp-gpios", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) return pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) #else /* CONFIG_OF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) static inline struct mci_platform_data*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) atmci_of_init(struct platform_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) static inline unsigned int atmci_get_version(struct atmel_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * Fix sconfig's burst size according to atmel MCI. We need to convert them as:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * With version 0x600, we need to convert them as: 1 -> 0, 2 -> 1, 4 -> 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * 8 -> 3, 16 -> 4.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * This can be done by finding most significant bit set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) static inline unsigned int atmci_convert_chksize(struct atmel_mci *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) unsigned int maxburst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) unsigned int version = atmci_get_version(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) unsigned int offset = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (version >= 0x600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) offset = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (maxburst > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) return fls(maxburst) - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) static void atmci_timeout_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) struct atmel_mci *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) host = from_timer(host, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) dev_dbg(&host->pdev->dev, "software timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if (host->mrq->cmd->data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) host->mrq->cmd->data->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) host->data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * With some SDIO modules, sometimes DMA transfer hangs. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * stop_transfer() is not called then the DMA request is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * removed, following ones are queued and never computed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (host->state == STATE_DATA_XFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) host->stop_transfer(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) host->mrq->cmd->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) host->cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) host->need_reset = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) host->state = STATE_END_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) tasklet_schedule(&host->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) unsigned int ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * It is easier here to use us instead of ns for the timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * it prevents from overflows during calculation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) unsigned int us = DIV_ROUND_UP(ns, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /* Maximum clock frequency is host->bus_hz/2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) return us * (DIV_ROUND_UP(host->bus_hz, 2000000));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) static void atmci_set_timeout(struct atmel_mci *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) struct atmel_mci_slot *slot, struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) static unsigned dtomul_to_shift[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 0, 4, 7, 8, 10, 12, 16, 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) unsigned timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) unsigned dtocyc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) unsigned dtomul;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) timeout = atmci_ns_to_clocks(host, data->timeout_ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) + data->timeout_clks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) for (dtomul = 0; dtomul < 8; dtomul++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) unsigned shift = dtomul_to_shift[dtomul];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) dtocyc = (timeout + (1 << shift) - 1) >> shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (dtocyc < 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (dtomul >= 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) dtomul = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) dtocyc = 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) dev_vdbg(&slot->mmc->class_dev, "setting timeout to %u cycles\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) dtocyc << dtomul_to_shift[dtomul]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) atmci_writel(host, ATMCI_DTOR, (ATMCI_DTOMUL(dtomul) | ATMCI_DTOCYC(dtocyc)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * Return mask with command flags to be enabled for this command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) static u32 atmci_prepare_command(struct mmc_host *mmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) struct mmc_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) u32 cmdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) cmd->error = -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) cmdr = ATMCI_CMDR_CMDNB(cmd->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (cmd->flags & MMC_RSP_PRESENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (cmd->flags & MMC_RSP_136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) cmdr |= ATMCI_CMDR_RSPTYP_136BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) cmdr |= ATMCI_CMDR_RSPTYP_48BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * This should really be MAXLAT_5 for CMD2 and ACMD41, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) * it's too difficult to determine whether this is an ACMD or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * not. Better make it 64.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) cmdr |= ATMCI_CMDR_MAXLAT_64CYC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) cmdr |= ATMCI_CMDR_OPDCMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) data = cmd->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) cmdr |= ATMCI_CMDR_START_XFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (cmd->opcode == SD_IO_RW_EXTENDED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) cmdr |= ATMCI_CMDR_SDIO_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (data->blocks > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) cmdr |= ATMCI_CMDR_MULTI_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) cmdr |= ATMCI_CMDR_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) if (data->flags & MMC_DATA_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) cmdr |= ATMCI_CMDR_TRDIR_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) return cmdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) static void atmci_send_command(struct atmel_mci *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) struct mmc_command *cmd, u32 cmd_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) unsigned int timeout_ms = cmd->busy_timeout ? cmd->busy_timeout :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) ATMCI_CMD_TIMEOUT_MS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) WARN_ON(host->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) host->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) dev_vdbg(&host->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) "start command: ARGR=0x%08x CMDR=0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) cmd->arg, cmd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) atmci_writel(host, ATMCI_ARGR, cmd->arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) atmci_writel(host, ATMCI_CMDR, cmd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) mod_timer(&host->timer, jiffies + msecs_to_jiffies(timeout_ms));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) dev_dbg(&host->pdev->dev, "send stop command\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) atmci_send_command(host, data->stop, host->stop_cmdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * Configure given PDC buffer taking care of alignement issues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * Update host->data_size and host->sg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) static void atmci_pdc_set_single_buf(struct atmel_mci *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) enum atmci_xfer_dir dir, enum atmci_pdc_buf buf_nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) u32 pointer_reg, counter_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) unsigned int buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (dir == XFER_RECEIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) pointer_reg = ATMEL_PDC_RPR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) counter_reg = ATMEL_PDC_RCR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) pointer_reg = ATMEL_PDC_TPR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) counter_reg = ATMEL_PDC_TCR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (buf_nb == PDC_SECOND_BUF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) pointer_reg += ATMEL_PDC_SCND_BUF_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) counter_reg += ATMEL_PDC_SCND_BUF_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (!host->caps.has_rwproof) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) buf_size = host->buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) atmci_writel(host, pointer_reg, host->buf_phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) buf_size = sg_dma_len(host->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) atmci_writel(host, pointer_reg, sg_dma_address(host->sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) if (host->data_size <= buf_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if (host->data_size & 0x3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) /* If size is different from modulo 4, transfer bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) atmci_writel(host, counter_reg, host->data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCFBYTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) /* Else transfer 32-bits words */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) atmci_writel(host, counter_reg, host->data_size / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) host->data_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) /* We assume the size of a page is 32-bits aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) atmci_writel(host, counter_reg, sg_dma_len(host->sg) / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) host->data_size -= sg_dma_len(host->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (host->data_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) host->sg = sg_next(host->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) * Configure PDC buffer according to the data size ie configuring one or two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) * buffers. Don't use this function if you want to configure only the second
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * buffer. In this case, use atmci_pdc_set_single_buf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) static void atmci_pdc_set_both_buf(struct atmel_mci *host, int dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) atmci_pdc_set_single_buf(host, dir, PDC_FIRST_BUF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (host->data_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) atmci_pdc_set_single_buf(host, dir, PDC_SECOND_BUF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * Unmap sg lists, called when transfer is finished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) static void atmci_pdc_cleanup(struct atmel_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) struct mmc_data *data = host->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) if (data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) dma_unmap_sg(&host->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) data->sg, data->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) mmc_get_dma_dir(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * Disable PDC transfers. Update pending flags to EVENT_XFER_COMPLETE after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * having received ATMCI_TXBUFE or ATMCI_RXBUFF interrupt. Enable ATMCI_NOTBUSY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * interrupt needed for both transfer directions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) static void atmci_pdc_complete(struct atmel_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) int transfer_size = host->data->blocks * host->data->blksz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) if ((!host->caps.has_rwproof)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) && (host->data->flags & MMC_DATA_READ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) if (host->caps.has_bad_data_ordering)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) for (i = 0; i < transfer_size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) host->buffer[i] = swab32(host->buffer[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) sg_copy_from_buffer(host->data->sg, host->data->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) host->buffer, transfer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) atmci_pdc_cleanup(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) dev_dbg(&host->pdev->dev, "(%s) set pending xfer complete\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) atmci_set_pending(host, EVENT_XFER_COMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) tasklet_schedule(&host->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) static void atmci_dma_cleanup(struct atmel_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) struct mmc_data *data = host->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if (data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) dma_unmap_sg(host->dma.chan->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) data->sg, data->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) mmc_get_dma_dir(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) * This function is called by the DMA driver from tasklet context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) static void atmci_dma_complete(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) struct atmel_mci *host = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) struct mmc_data *data = host->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) dev_vdbg(&host->pdev->dev, "DMA complete\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) if (host->caps.has_dma_conf_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) /* Disable DMA hardware handshaking on MCI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) atmci_writel(host, ATMCI_DMA, atmci_readl(host, ATMCI_DMA) & ~ATMCI_DMAEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) atmci_dma_cleanup(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) * If the card was removed, data will be NULL. No point trying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) * to send the stop command or waiting for NBUSY in this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) if (data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) dev_dbg(&host->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) "(%s) set pending xfer complete\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) atmci_set_pending(host, EVENT_XFER_COMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) tasklet_schedule(&host->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) * Regardless of what the documentation says, we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * to wait for NOTBUSY even after block read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * When the DMA transfer is complete, the controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * may still be reading the CRC from the card, i.e.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) * the data transfer is still in progress and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * haven't seen all the potential error bits yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) * The interrupt handler will schedule a different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) * tasklet to finish things up when the data transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) * is completely done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) * We may not complete the mmc request here anyway
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) * because the mmc layer may call back and cause us to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) * violate the "don't submit new operations from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * completion callback" rule of the dma engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * framework.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * Returns a mask of interrupt flags to be enabled after the whole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * request has been prepared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) u32 iflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) data->error = -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) host->sg = data->sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) host->sg_len = data->sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) host->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) host->data_chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) iflags = ATMCI_DATA_ERROR_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) * Errata: MMC data write operation with less than 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) * bytes is impossible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) * Errata: MCI Transmit Data Register (TDR) FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) * corruption when length is not multiple of 4.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) if (data->blocks * data->blksz < 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) || (data->blocks * data->blksz) & 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) host->need_reset = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) host->pio_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) if (data->flags & MMC_DATA_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) iflags |= ATMCI_RXRDY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) iflags |= ATMCI_TXRDY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) return iflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * Set interrupt flags and set block length into the MCI mode register even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * if this value is also accessible in the MCI block register. It seems to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * necessary before the High Speed MCI version. It also map sg and configure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * PDC registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) static u32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) u32 iflags, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) data->error = -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) host->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) host->sg = data->sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) iflags = ATMCI_DATA_ERROR_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) /* Enable pdc mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (data->flags & MMC_DATA_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) iflags |= ATMCI_ENDRX | ATMCI_RXBUFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) iflags |= ATMCI_ENDTX | ATMCI_TXBUFE | ATMCI_BLKE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) /* Set BLKLEN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) tmp = atmci_readl(host, ATMCI_MR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) tmp &= 0x0000ffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) tmp |= ATMCI_BLKLEN(data->blksz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) atmci_writel(host, ATMCI_MR, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) /* Configure PDC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) host->data_size = data->blocks * data->blksz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) mmc_get_dma_dir(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if ((!host->caps.has_rwproof)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) && (host->data->flags & MMC_DATA_WRITE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) sg_copy_to_buffer(host->data->sg, host->data->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) host->buffer, host->data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) if (host->caps.has_bad_data_ordering)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) for (i = 0; i < host->data_size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) host->buffer[i] = swab32(host->buffer[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) if (host->data_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) atmci_pdc_set_both_buf(host, data->flags & MMC_DATA_READ ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) XFER_RECEIVE : XFER_TRANSMIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) return iflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) static u32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) struct dma_async_tx_descriptor *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) enum dma_transfer_direction slave_dirn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) unsigned int sglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) u32 maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) u32 iflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) data->error = -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) WARN_ON(host->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) host->sg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) host->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) iflags = ATMCI_DATA_ERROR_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) * We don't do DMA on "complex" transfers, i.e. with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) * non-word-aligned buffers or lengths. Also, we don't bother
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) * with all the DMA setup overhead for short transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) return atmci_prepare_data(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) if (data->blksz & 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) return atmci_prepare_data(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) for_each_sg(data->sg, sg, data->sg_len, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if (sg->offset & 3 || sg->length & 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) return atmci_prepare_data(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) /* If we don't have a channel, we can't do DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) chan = host->dma.chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) if (chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) host->data_chan = chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) if (!chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) if (data->flags & MMC_DATA_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) maxburst = atmci_convert_chksize(host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) host->dma_conf.src_maxburst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) maxburst = atmci_convert_chksize(host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) host->dma_conf.dst_maxburst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) if (host->caps.has_dma_conf_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) ATMCI_DMAEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) sglen = dma_map_sg(chan->device->dev, data->sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) data->sg_len, mmc_get_dma_dir(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) dmaengine_slave_config(chan, &host->dma_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) desc = dmaengine_prep_slave_sg(chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) data->sg, sglen, slave_dirn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) goto unmap_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) host->dma.data_desc = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) desc->callback = atmci_dma_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) desc->callback_param = host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) return iflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) unmap_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) dma_unmap_sg(chan->device->dev, data->sg, data->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) mmc_get_dma_dir(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) atmci_submit_data(struct atmel_mci *host, struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) * Start PDC according to transfer direction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) atmci_submit_data_pdc(struct atmel_mci *host, struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) if (data->flags & MMC_DATA_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) struct dma_chan *chan = host->data_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) struct dma_async_tx_descriptor *desc = host->dma.data_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) if (chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) dmaengine_submit(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) dma_async_issue_pending(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) static void atmci_stop_transfer(struct atmel_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) dev_dbg(&host->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) "(%s) set pending xfer complete\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) atmci_set_pending(host, EVENT_XFER_COMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) * Stop data transfer because error(s) occurred.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) static void atmci_stop_transfer_pdc(struct atmel_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) static void atmci_stop_transfer_dma(struct atmel_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) struct dma_chan *chan = host->data_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) if (chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) dmaengine_terminate_all(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) atmci_dma_cleanup(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) /* Data transfer was stopped by the interrupt handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) dev_dbg(&host->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) "(%s) set pending xfer complete\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) atmci_set_pending(host, EVENT_XFER_COMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) * Start a request: prepare data if needed, prepare the command and activate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) * interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) static void atmci_start_request(struct atmel_mci *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) struct atmel_mci_slot *slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) struct mmc_request *mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) struct mmc_command *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) struct mmc_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) u32 iflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) u32 cmdflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) mrq = slot->mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) host->cur_slot = slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) host->mrq = mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) host->pending_events = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) host->completed_events = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) host->cmd_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) host->data_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) dev_dbg(&host->pdev->dev, "start request: cmd %u\n", mrq->cmd->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) if (host->need_reset || host->caps.need_reset_after_xfer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) iflags = atmci_readl(host, ATMCI_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) atmci_writel(host, ATMCI_MR, host->mode_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) if (host->caps.has_cfg_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) atmci_writel(host, ATMCI_CFG, host->cfg_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) atmci_writel(host, ATMCI_IER, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) host->need_reset = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) atmci_writel(host, ATMCI_SDCR, slot->sdc_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) iflags = atmci_readl(host, ATMCI_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) if (iflags & ~(ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) dev_dbg(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) /* Send init sequence (74 clock cycles) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) atmci_writel(host, ATMCI_CMDR, ATMCI_CMDR_SPCMD_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) while (!(atmci_readl(host, ATMCI_SR) & ATMCI_CMDRDY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) iflags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) data = mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) if (data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) atmci_set_timeout(host, slot, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) /* Must set block count/size before sending command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) atmci_writel(host, ATMCI_BLKR, ATMCI_BCNT(data->blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) | ATMCI_BLKLEN(data->blksz));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) ATMCI_BCNT(data->blocks) | ATMCI_BLKLEN(data->blksz));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) iflags |= host->prepare_data(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) iflags |= ATMCI_CMDRDY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) cmd = mrq->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) cmdflags = atmci_prepare_command(slot->mmc, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) * DMA transfer should be started before sending the command to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) * unexpected errors especially for read operations in SDIO mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) * Unfortunately, in PDC mode, command has to be sent before starting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) * the transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) if (host->submit_data != &atmci_submit_data_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) atmci_send_command(host, cmd, cmdflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) if (data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) host->submit_data(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) if (host->submit_data == &atmci_submit_data_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) atmci_send_command(host, cmd, cmdflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (mrq->stop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) host->stop_cmdr |= ATMCI_CMDR_STOP_XFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) if (!(data->flags & MMC_DATA_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) host->stop_cmdr |= ATMCI_CMDR_TRDIR_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) host->stop_cmdr |= ATMCI_CMDR_MULTI_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) * We could have enabled interrupts earlier, but I suspect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) * that would open up a nice can of interesting race
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) * conditions (e.g. command and data complete, but stop not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) * prepared yet.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) atmci_writel(host, ATMCI_IER, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) static void atmci_queue_request(struct atmel_mci *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) struct atmel_mci_slot *slot, struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) host->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) spin_lock_bh(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) slot->mrq = mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) if (host->state == STATE_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) host->state = STATE_SENDING_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) atmci_start_request(host, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) dev_dbg(&host->pdev->dev, "queue request\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) list_add_tail(&slot->queue_node, &host->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) spin_unlock_bh(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) struct atmel_mci_slot *slot = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) struct atmel_mci *host = slot->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) struct mmc_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) WARN_ON(slot->mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) dev_dbg(&host->pdev->dev, "MRQ: cmd %u\n", mrq->cmd->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) * We may "know" the card is gone even though there's still an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) * electrical connection. If so, we really need to communicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) * this to the MMC core since there won't be any more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) * interrupts as the card is completely removed. Otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) * the MMC core might believe the card is still there even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) * though the card was just removed very slowly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) if (!test_bit(ATMCI_CARD_PRESENT, &slot->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) mrq->cmd->error = -ENOMEDIUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) mmc_request_done(mmc, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) /* We don't support multiple blocks of weird lengths. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) data = mrq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) if (data && data->blocks > 1 && data->blksz & 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) mrq->cmd->error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) mmc_request_done(mmc, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) atmci_queue_request(host, slot, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) struct atmel_mci_slot *slot = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) struct atmel_mci *host = slot->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) slot->sdc_reg &= ~ATMCI_SDCBUS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) switch (ios->bus_width) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) case MMC_BUS_WIDTH_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) slot->sdc_reg |= ATMCI_SDCBUS_1BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) case MMC_BUS_WIDTH_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) slot->sdc_reg |= ATMCI_SDCBUS_4BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) case MMC_BUS_WIDTH_8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) slot->sdc_reg |= ATMCI_SDCBUS_8BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) if (ios->clock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) unsigned int clock_min = ~0U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) int clkdiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) spin_lock_bh(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) if (!host->mode_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) if (host->caps.has_cfg_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) atmci_writel(host, ATMCI_CFG, host->cfg_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) * Use mirror of ios->clock to prevent race with mmc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) * core ios update when finding the minimum.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) slot->clock = ios->clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) if (host->slot[i] && host->slot[i]->clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) && host->slot[i]->clock < clock_min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) clock_min = host->slot[i]->clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) /* Calculate clock divider */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) if (host->caps.has_odd_clk_div) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) if (clkdiv < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) dev_warn(&mmc->class_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) "clock %u too fast; using %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) clock_min, host->bus_hz / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) clkdiv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) } else if (clkdiv > 511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) dev_warn(&mmc->class_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) "clock %u too slow; using %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) clock_min, host->bus_hz / (511 + 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) clkdiv = 511;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) host->mode_reg = ATMCI_MR_CLKDIV(clkdiv >> 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) | ATMCI_MR_CLKODD(clkdiv & 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) if (clkdiv > 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) dev_warn(&mmc->class_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) "clock %u too slow; using %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) clock_min, host->bus_hz / (2 * 256));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) clkdiv = 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) host->mode_reg = ATMCI_MR_CLKDIV(clkdiv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) * WRPROOF and RDPROOF prevent overruns/underruns by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) * stopping the clock when the FIFO is full/empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) * This state is not expected to last for long.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) if (host->caps.has_rwproof)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) host->mode_reg |= (ATMCI_MR_WRPROOF | ATMCI_MR_RDPROOF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) if (host->caps.has_cfg_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) /* setup High Speed mode in relation with card capacity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) if (ios->timing == MMC_TIMING_SD_HS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) host->cfg_reg |= ATMCI_CFG_HSMODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) host->cfg_reg &= ~ATMCI_CFG_HSMODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) if (list_empty(&host->queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) atmci_writel(host, ATMCI_MR, host->mode_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) if (host->caps.has_cfg_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) atmci_writel(host, ATMCI_CFG, host->cfg_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) host->need_clock_update = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) spin_unlock_bh(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) bool any_slot_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) spin_lock_bh(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) slot->clock = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) if (host->slot[i] && host->slot[i]->clock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) any_slot_active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) if (!any_slot_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) if (host->mode_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) atmci_readl(host, ATMCI_MR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) host->mode_reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) spin_unlock_bh(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) switch (ios->power_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) case MMC_POWER_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) if (!IS_ERR(mmc->supply.vmmc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) case MMC_POWER_UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) set_bit(ATMCI_CARD_NEED_INIT, &slot->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) if (!IS_ERR(mmc->supply.vmmc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) static int atmci_get_ro(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) int read_only = -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) struct atmel_mci_slot *slot = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) if (gpio_is_valid(slot->wp_pin)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) read_only = gpio_get_value(slot->wp_pin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) dev_dbg(&mmc->class_dev, "card is %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) read_only ? "read-only" : "read-write");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) return read_only;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) static int atmci_get_cd(struct mmc_host *mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) int present = -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) struct atmel_mci_slot *slot = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) if (gpio_is_valid(slot->detect_pin)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) present = !(gpio_get_value(slot->detect_pin) ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) slot->detect_is_active_high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) dev_dbg(&mmc->class_dev, "card is %spresent\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) present ? "" : "not ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) return present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) static void atmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) struct atmel_mci_slot *slot = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) struct atmel_mci *host = slot->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) atmci_writel(host, ATMCI_IER, slot->sdio_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) atmci_writel(host, ATMCI_IDR, slot->sdio_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) static const struct mmc_host_ops atmci_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) .request = atmci_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) .set_ios = atmci_set_ios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) .get_ro = atmci_get_ro,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) .get_cd = atmci_get_cd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) .enable_sdio_irq = atmci_enable_sdio_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) /* Called with host->lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) __releases(&host->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) __acquires(&host->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) struct atmel_mci_slot *slot = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) struct mmc_host *prev_mmc = host->cur_slot->mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) WARN_ON(host->cmd || host->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) del_timer(&host->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) * Update the MMC clock rate if necessary. This may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) * necessary if set_ios() is called when a different slot is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) * busy transferring data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) if (host->need_clock_update) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) atmci_writel(host, ATMCI_MR, host->mode_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) if (host->caps.has_cfg_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) atmci_writel(host, ATMCI_CFG, host->cfg_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) host->cur_slot->mrq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) host->mrq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) if (!list_empty(&host->queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) slot = list_entry(host->queue.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) struct atmel_mci_slot, queue_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) list_del(&slot->queue_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) mmc_hostname(slot->mmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) host->state = STATE_SENDING_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) atmci_start_request(host, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) dev_vdbg(&host->pdev->dev, "list empty\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) host->state = STATE_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) spin_unlock(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) mmc_request_done(prev_mmc, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) spin_lock(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) static void atmci_command_complete(struct atmel_mci *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) struct mmc_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) u32 status = host->cmd_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) /* Read the response from the card (up to 16 bytes) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) cmd->resp[0] = atmci_readl(host, ATMCI_RSPR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) cmd->resp[1] = atmci_readl(host, ATMCI_RSPR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) cmd->resp[2] = atmci_readl(host, ATMCI_RSPR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) cmd->resp[3] = atmci_readl(host, ATMCI_RSPR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) if (status & ATMCI_RTOE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) cmd->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) else if ((cmd->flags & MMC_RSP_CRC) && (status & ATMCI_RCRCE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) cmd->error = -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) else if (status & (ATMCI_RINDE | ATMCI_RDIRE | ATMCI_RENDE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) cmd->error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) else if (host->mrq->data && (host->mrq->data->blksz & 3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) if (host->caps.need_blksz_mul_4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) cmd->error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) host->need_reset = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) cmd->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) static void atmci_detect_change(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) struct atmel_mci_slot *slot = from_timer(slot, t, detect_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) bool present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) bool present_old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) * atmci_cleanup_slot() sets the ATMCI_SHUTDOWN flag before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) * freeing the interrupt. We must not re-enable the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) * if it has been freed, and if we're shutting down, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) * doesn't really matter whether the card is present or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) if (test_bit(ATMCI_SHUTDOWN, &slot->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) enable_irq(gpio_to_irq(slot->detect_pin));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) present = !(gpio_get_value(slot->detect_pin) ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) slot->detect_is_active_high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) present, present_old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) if (present != present_old) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) struct atmel_mci *host = slot->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) struct mmc_request *mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) dev_dbg(&slot->mmc->class_dev, "card %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) present ? "inserted" : "removed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) spin_lock(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) if (!present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) set_bit(ATMCI_CARD_PRESENT, &slot->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) /* Clean up queue if present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) mrq = slot->mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) if (mrq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) if (mrq == host->mrq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) * Reset controller to terminate any ongoing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) * commands or data transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) atmci_writel(host, ATMCI_MR, host->mode_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) if (host->caps.has_cfg_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) atmci_writel(host, ATMCI_CFG, host->cfg_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) host->data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) host->cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) switch (host->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) case STATE_IDLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) case STATE_SENDING_CMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) mrq->cmd->error = -ENOMEDIUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) if (mrq->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) host->stop_transfer(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) case STATE_DATA_XFER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) mrq->data->error = -ENOMEDIUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) host->stop_transfer(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) case STATE_WAITING_NOTBUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) mrq->data->error = -ENOMEDIUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) case STATE_SENDING_STOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) mrq->stop->error = -ENOMEDIUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) case STATE_END_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) atmci_request_end(host, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) list_del(&slot->queue_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) mrq->cmd->error = -ENOMEDIUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) if (mrq->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) mrq->data->error = -ENOMEDIUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) if (mrq->stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) mrq->stop->error = -ENOMEDIUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) spin_unlock(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) mmc_request_done(slot->mmc, mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) spin_lock(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) spin_unlock(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) mmc_detect_change(slot->mmc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) static void atmci_tasklet_func(unsigned long priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) struct atmel_mci *host = (struct atmel_mci *)priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) struct mmc_request *mrq = host->mrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) struct mmc_data *data = host->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) enum atmel_mci_state state = host->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) enum atmel_mci_state prev_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) spin_lock(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) state = host->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) dev_vdbg(&host->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) "tasklet: state %u pending/completed/mask %lx/%lx/%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) state, host->pending_events, host->completed_events,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) atmci_readl(host, ATMCI_IMR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) prev_state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) dev_dbg(&host->pdev->dev, "FSM: state=%d\n", state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) case STATE_IDLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) case STATE_SENDING_CMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) * Command has been sent, we are waiting for command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) * ready. Then we have three next states possible:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) * END_REQUEST by default, WAITING_NOTBUSY if it's a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) * command needing it or DATA_XFER if there is data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) if (!atmci_test_and_clear_pending(host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) EVENT_CMD_RDY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) dev_dbg(&host->pdev->dev, "set completed cmd ready\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) host->cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) atmci_set_completed(host, EVENT_CMD_RDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) atmci_command_complete(host, mrq->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) if (mrq->data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) dev_dbg(&host->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) "command with data transfer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) * If there is a command error don't start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) * data transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) if (mrq->cmd->error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) host->stop_transfer(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) host->data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) atmci_writel(host, ATMCI_IDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) ATMCI_TXRDY | ATMCI_RXRDY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) | ATMCI_DATA_ERROR_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) state = STATE_END_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) state = STATE_DATA_XFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) } else if ((!mrq->data) && (mrq->cmd->flags & MMC_RSP_BUSY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) dev_dbg(&host->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) "command response need waiting notbusy");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) state = STATE_WAITING_NOTBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) state = STATE_END_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) case STATE_DATA_XFER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) if (atmci_test_and_clear_pending(host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) EVENT_DATA_ERROR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) dev_dbg(&host->pdev->dev, "set completed data error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) atmci_set_completed(host, EVENT_DATA_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) state = STATE_END_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) * A data transfer is in progress. The event expected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) * to move to the next state depends of data transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) * type (PDC or DMA). Once transfer done we can move
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) * to the next step which is WAITING_NOTBUSY in write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) * case and directly SENDING_STOP in read case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) dev_dbg(&host->pdev->dev, "FSM: xfer complete?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) if (!atmci_test_and_clear_pending(host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) EVENT_XFER_COMPLETE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) dev_dbg(&host->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) "(%s) set completed xfer complete\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) atmci_set_completed(host, EVENT_XFER_COMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) if (host->caps.need_notbusy_for_read_ops ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) (host->data->flags & MMC_DATA_WRITE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) state = STATE_WAITING_NOTBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) } else if (host->mrq->stop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) atmci_send_stop_cmd(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) state = STATE_SENDING_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) host->data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) data->bytes_xfered = data->blocks * data->blksz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) data->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) state = STATE_END_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) case STATE_WAITING_NOTBUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) * We can be in the state for two reasons: a command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) * requiring waiting not busy signal (stop command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) * included) or a write operation. In the latest case,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) * we need to send a stop command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) dev_dbg(&host->pdev->dev, "FSM: not busy?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) if (!atmci_test_and_clear_pending(host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) EVENT_NOTBUSY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) dev_dbg(&host->pdev->dev, "set completed not busy\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) atmci_set_completed(host, EVENT_NOTBUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) if (host->data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) * For some commands such as CMD53, even if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) * there is data transfer, there is no stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) * command to send.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) if (host->mrq->stop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) atmci_writel(host, ATMCI_IER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) ATMCI_CMDRDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) atmci_send_stop_cmd(host, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) state = STATE_SENDING_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) host->data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) data->bytes_xfered = data->blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) * data->blksz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) data->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) state = STATE_END_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) state = STATE_END_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) case STATE_SENDING_STOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) * In this state, it is important to set host->data to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) * NULL (which is tested in the waiting notbusy state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) * in order to go to the end request state instead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) * sending stop again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) if (!atmci_test_and_clear_pending(host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) EVENT_CMD_RDY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) dev_dbg(&host->pdev->dev, "FSM: cmd ready\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) host->cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) data->bytes_xfered = data->blocks * data->blksz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) data->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) atmci_command_complete(host, mrq->stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) if (mrq->stop->error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) host->stop_transfer(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) atmci_writel(host, ATMCI_IDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) ATMCI_TXRDY | ATMCI_RXRDY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) | ATMCI_DATA_ERROR_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) state = STATE_END_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) state = STATE_WAITING_NOTBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) host->data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) case STATE_END_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY | ATMCI_RXRDY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) | ATMCI_DATA_ERROR_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) status = host->data_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) if (unlikely(status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) host->stop_transfer(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) host->data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) if (data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) if (status & ATMCI_DTOE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) data->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) } else if (status & ATMCI_DCRCE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) data->error = -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) data->error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) atmci_request_end(host, host->mrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) goto unlock; /* atmci_request_end() sets host->state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) } while (state != prev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) host->state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) spin_unlock(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) static void atmci_read_data_pio(struct atmel_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) struct scatterlist *sg = host->sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) unsigned int offset = host->pio_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) struct mmc_data *data = host->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) unsigned int nbytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) value = atmci_readl(host, ATMCI_RDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) if (likely(offset + 4 <= sg->length)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) sg_pcopy_from_buffer(sg, 1, &value, sizeof(u32), offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) offset += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) nbytes += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) if (offset == sg->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) flush_dcache_page(sg_page(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) host->sg = sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) host->sg_len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) if (!sg || !host->sg_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) unsigned int remaining = sg->length - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) sg_pcopy_from_buffer(sg, 1, &value, remaining, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) nbytes += remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) flush_dcache_page(sg_page(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) host->sg = sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) host->sg_len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) if (!sg || !host->sg_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) offset = 4 - remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) sg_pcopy_from_buffer(sg, 1, (u8 *)&value + remaining,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) offset, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) nbytes += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) status = atmci_readl(host, ATMCI_SR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) if (status & ATMCI_DATA_ERROR_FLAGS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_RXRDY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) | ATMCI_DATA_ERROR_FLAGS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) host->data_status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) data->bytes_xfered += nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) } while (status & ATMCI_RXRDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) host->pio_offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) data->bytes_xfered += nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) atmci_writel(host, ATMCI_IDR, ATMCI_RXRDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) data->bytes_xfered += nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) atmci_set_pending(host, EVENT_XFER_COMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) static void atmci_write_data_pio(struct atmel_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) struct scatterlist *sg = host->sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) unsigned int offset = host->pio_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) struct mmc_data *data = host->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) unsigned int nbytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) if (likely(offset + 4 <= sg->length)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) sg_pcopy_to_buffer(sg, 1, &value, sizeof(u32), offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) atmci_writel(host, ATMCI_TDR, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) offset += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) nbytes += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) if (offset == sg->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) host->sg = sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) host->sg_len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) if (!sg || !host->sg_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) unsigned int remaining = sg->length - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) sg_pcopy_to_buffer(sg, 1, &value, remaining, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) nbytes += remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) host->sg = sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) host->sg_len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) if (!sg || !host->sg_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) atmci_writel(host, ATMCI_TDR, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) offset = 4 - remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) sg_pcopy_to_buffer(sg, 1, (u8 *)&value + remaining,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) offset, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) atmci_writel(host, ATMCI_TDR, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) nbytes += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) status = atmci_readl(host, ATMCI_SR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) if (status & ATMCI_DATA_ERROR_FLAGS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_TXRDY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) | ATMCI_DATA_ERROR_FLAGS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) host->data_status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) data->bytes_xfered += nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) } while (status & ATMCI_TXRDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) host->pio_offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) data->bytes_xfered += nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) data->bytes_xfered += nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) atmci_set_pending(host, EVENT_XFER_COMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) struct atmel_mci_slot *slot = host->slot[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) if (slot && (status & slot->sdio_irq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) mmc_signal_sdio_irq(slot->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) static irqreturn_t atmci_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) struct atmel_mci *host = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) u32 status, mask, pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) unsigned int pass_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) status = atmci_readl(host, ATMCI_SR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) mask = atmci_readl(host, ATMCI_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) pending = status & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) if (!pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) if (pending & ATMCI_DATA_ERROR_FLAGS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) dev_dbg(&host->pdev->dev, "IRQ: data error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) | ATMCI_RXRDY | ATMCI_TXRDY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) | ATMCI_ENDRX | ATMCI_ENDTX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) | ATMCI_RXBUFF | ATMCI_TXBUFE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) host->data_status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) dev_dbg(&host->pdev->dev, "set pending data error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) atmci_set_pending(host, EVENT_DATA_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) tasklet_schedule(&host->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) if (pending & ATMCI_TXBUFE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) dev_dbg(&host->pdev->dev, "IRQ: tx buffer empty\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) * We can receive this interruption before having configured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) * the second pdc buffer, so we need to reconfigure first and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) * second buffers again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) if (host->data_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) atmci_pdc_set_both_buf(host, XFER_TRANSMIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) atmci_writel(host, ATMCI_IER, ATMCI_TXBUFE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) atmci_pdc_complete(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) } else if (pending & ATMCI_ENDTX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) dev_dbg(&host->pdev->dev, "IRQ: end of tx buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) if (host->data_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) atmci_pdc_set_single_buf(host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) XFER_TRANSMIT, PDC_SECOND_BUF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) if (pending & ATMCI_RXBUFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) dev_dbg(&host->pdev->dev, "IRQ: rx buffer full\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) * We can receive this interruption before having configured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) * the second pdc buffer, so we need to reconfigure first and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) * second buffers again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) if (host->data_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) atmci_pdc_set_both_buf(host, XFER_RECEIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) atmci_writel(host, ATMCI_IER, ATMCI_RXBUFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) atmci_pdc_complete(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) } else if (pending & ATMCI_ENDRX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) dev_dbg(&host->pdev->dev, "IRQ: end of rx buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) if (host->data_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) atmci_pdc_set_single_buf(host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) XFER_RECEIVE, PDC_SECOND_BUF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) * First mci IPs, so mainly the ones having pdc, have some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) * issues with the notbusy signal. You can't get it after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) * data transmission if you have not sent a stop command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) * The appropriate workaround is to use the BLKE signal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) if (pending & ATMCI_BLKE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) dev_dbg(&host->pdev->dev, "IRQ: blke\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) atmci_writel(host, ATMCI_IDR, ATMCI_BLKE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) dev_dbg(&host->pdev->dev, "set pending notbusy\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) atmci_set_pending(host, EVENT_NOTBUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) tasklet_schedule(&host->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) if (pending & ATMCI_NOTBUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) dev_dbg(&host->pdev->dev, "IRQ: not_busy\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) dev_dbg(&host->pdev->dev, "set pending notbusy\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) atmci_set_pending(host, EVENT_NOTBUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) tasklet_schedule(&host->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) if (pending & ATMCI_RXRDY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) atmci_read_data_pio(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) if (pending & ATMCI_TXRDY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) atmci_write_data_pio(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) if (pending & ATMCI_CMDRDY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) dev_dbg(&host->pdev->dev, "IRQ: cmd ready\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) host->cmd_status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) dev_dbg(&host->pdev->dev, "set pending cmd rdy\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) atmci_set_pending(host, EVENT_CMD_RDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) tasklet_schedule(&host->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) atmci_sdio_interrupt(host, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) } while (pass_count++ < 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) return pass_count ? IRQ_HANDLED : IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) struct atmel_mci_slot *slot = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) * Disable interrupts until the pin has stabilized and check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) * the state then. Use mod_timer() since we may be in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) * middle of the timer routine when this interrupt triggers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) disable_irq_nosync(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) mod_timer(&slot->detect_timer, jiffies + msecs_to_jiffies(20));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) static int atmci_init_slot(struct atmel_mci *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) struct mci_slot_pdata *slot_data, unsigned int id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) u32 sdc_reg, u32 sdio_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) struct mmc_host *mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) struct atmel_mci_slot *slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) if (!mmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) slot = mmc_priv(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) slot->mmc = mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) slot->host = host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) slot->detect_pin = slot_data->detect_pin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) slot->wp_pin = slot_data->wp_pin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) slot->detect_is_active_high = slot_data->detect_is_active_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) slot->sdc_reg = sdc_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) slot->sdio_irq = sdio_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) dev_dbg(&mmc->class_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) "slot[%u]: bus_width=%u, detect_pin=%d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) "detect_is_active_high=%s, wp_pin=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) id, slot_data->bus_width, slot_data->detect_pin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) slot_data->detect_is_active_high ? "true" : "false",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) slot_data->wp_pin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) mmc->ops = &atmci_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) mmc->f_max = host->bus_hz / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) if (sdio_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) mmc->caps |= MMC_CAP_SDIO_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) if (host->caps.has_highspeed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) mmc->caps |= MMC_CAP_SD_HIGHSPEED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) * Without the read/write proof capability, it is strongly suggested to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) * use only one bit for data to prevent fifo underruns and overruns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) * which will corrupt data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) if ((slot_data->bus_width >= 4) && host->caps.has_rwproof) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) mmc->caps |= MMC_CAP_4_BIT_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) if (slot_data->bus_width >= 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) mmc->caps |= MMC_CAP_8_BIT_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) if (atmci_get_version(host) < 0x200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) mmc->max_segs = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) mmc->max_blk_size = 4095;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) mmc->max_blk_count = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) mmc->max_seg_size = mmc->max_blk_size * mmc->max_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) mmc->max_segs = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) mmc->max_req_size = 32768 * 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) mmc->max_blk_size = 32768;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) mmc->max_blk_count = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) /* Assume card is present initially */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) set_bit(ATMCI_CARD_PRESENT, &slot->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) if (gpio_is_valid(slot->detect_pin)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) if (devm_gpio_request(&host->pdev->dev, slot->detect_pin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) "mmc_detect")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) dev_dbg(&mmc->class_dev, "no detect pin available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) slot->detect_pin = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) } else if (gpio_get_value(slot->detect_pin) ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) slot->detect_is_active_high) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) if (!gpio_is_valid(slot->detect_pin)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) if (slot_data->non_removable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) mmc->caps |= MMC_CAP_NONREMOVABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) mmc->caps |= MMC_CAP_NEEDS_POLL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) if (gpio_is_valid(slot->wp_pin)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) if (devm_gpio_request(&host->pdev->dev, slot->wp_pin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) "mmc_wp")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) dev_dbg(&mmc->class_dev, "no WP pin available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) slot->wp_pin = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) host->slot[id] = slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) mmc_regulator_get_supply(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) mmc_add_host(mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) if (gpio_is_valid(slot->detect_pin)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) timer_setup(&slot->detect_timer, atmci_detect_change, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) ret = request_irq(gpio_to_irq(slot->detect_pin),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) atmci_detect_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) "mmc-detect", slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) dev_dbg(&mmc->class_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) "could not request IRQ %d for detect pin\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) gpio_to_irq(slot->detect_pin));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) slot->detect_pin = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) atmci_init_debugfs(slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) static void atmci_cleanup_slot(struct atmel_mci_slot *slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) unsigned int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) /* Debugfs stuff is cleaned up by mmc core */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) set_bit(ATMCI_SHUTDOWN, &slot->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) mmc_remove_host(slot->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) if (gpio_is_valid(slot->detect_pin)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) int pin = slot->detect_pin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) free_irq(gpio_to_irq(pin), slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) del_timer_sync(&slot->detect_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) slot->host->slot[id] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) mmc_free_host(slot->mmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) static int atmci_configure_dma(struct atmel_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) host->dma.chan = dma_request_chan(&host->pdev->dev, "rxtx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) if (PTR_ERR(host->dma.chan) == -ENODEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) struct mci_platform_data *pdata = host->pdev->dev.platform_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) dma_cap_mask_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) if (!pdata || !pdata->dma_filter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) dma_cap_zero(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) dma_cap_set(DMA_SLAVE, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) host->dma.chan = dma_request_channel(mask, pdata->dma_filter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) pdata->dma_slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) if (!host->dma.chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) host->dma.chan = ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) if (IS_ERR(host->dma.chan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) return PTR_ERR(host->dma.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) dev_info(&host->pdev->dev, "using %s for DMA transfers\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) dma_chan_name(host->dma.chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) host->dma_conf.src_addr = host->mapbase + ATMCI_RDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) host->dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) host->dma_conf.src_maxburst = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) host->dma_conf.dst_addr = host->mapbase + ATMCI_TDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) host->dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) host->dma_conf.dst_maxburst = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) host->dma_conf.device_fc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) * HSMCI (High Speed MCI) module is not fully compatible with MCI module.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) * HSMCI provides DMA support and a new config register but no more supports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) * PDC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) static void atmci_get_cap(struct atmel_mci *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) unsigned int version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) version = atmci_get_version(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) dev_info(&host->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) "version: 0x%x\n", version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) host->caps.has_dma_conf_reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) host->caps.has_pdc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) host->caps.has_cfg_reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) host->caps.has_cstor_reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) host->caps.has_highspeed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) host->caps.has_rwproof = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) host->caps.has_odd_clk_div = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) host->caps.has_bad_data_ordering = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) host->caps.need_reset_after_xfer = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) host->caps.need_blksz_mul_4 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) host->caps.need_notbusy_for_read_ops = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) /* keep only major version number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) switch (version & 0xf00) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) case 0x600:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) case 0x500:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) host->caps.has_odd_clk_div = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) case 0x400:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) case 0x300:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) host->caps.has_dma_conf_reg = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) host->caps.has_pdc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) host->caps.has_cfg_reg = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) host->caps.has_cstor_reg = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) host->caps.has_highspeed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) case 0x200:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) host->caps.has_rwproof = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) host->caps.need_blksz_mul_4 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) host->caps.need_notbusy_for_read_ops = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) case 0x100:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) host->caps.has_bad_data_ordering = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) host->caps.need_reset_after_xfer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) case 0x0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) host->caps.has_pdc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) dev_warn(&host->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) "Unmanaged mci version, set minimum capabilities\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) static int atmci_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) struct mci_platform_data *pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) struct atmel_mci *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) struct resource *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) unsigned int nr_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) if (!regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) pdata = pdev->dev.platform_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) if (!pdata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) pdata = atmci_of_init(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) if (IS_ERR(pdata)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) dev_err(&pdev->dev, "platform data not available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) return PTR_ERR(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) if (!host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) host->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) spin_lock_init(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) INIT_LIST_HEAD(&host->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) host->mck = devm_clk_get(&pdev->dev, "mci_clk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) if (IS_ERR(host->mck))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) return PTR_ERR(host->mck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) host->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) if (!host->regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) ret = clk_prepare_enable(host->mck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) host->bus_hz = clk_get_rate(host->mck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) host->mapbase = regs->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) ret = request_irq(irq, atmci_interrupt, 0, dev_name(&pdev->dev), host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) clk_disable_unprepare(host->mck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) /* Get MCI capabilities and set operations according to it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) atmci_get_cap(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) ret = atmci_configure_dma(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) if (ret == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) goto err_dma_probe_defer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) host->prepare_data = &atmci_prepare_data_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) host->submit_data = &atmci_submit_data_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) host->stop_transfer = &atmci_stop_transfer_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) } else if (host->caps.has_pdc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) dev_info(&pdev->dev, "using PDC\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) host->prepare_data = &atmci_prepare_data_pdc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) host->submit_data = &atmci_submit_data_pdc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) host->stop_transfer = &atmci_stop_transfer_pdc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) dev_info(&pdev->dev, "using PIO\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) host->prepare_data = &atmci_prepare_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) host->submit_data = &atmci_submit_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) host->stop_transfer = &atmci_stop_transfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) platform_set_drvdata(pdev, host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) timer_setup(&host->timer, atmci_timeout_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) pm_runtime_get_noresume(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) pm_runtime_set_active(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) pm_runtime_use_autosuspend(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) pm_runtime_enable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) /* We need at least one slot to succeed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) nr_slots = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) if (pdata->slot[0].bus_width) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) ret = atmci_init_slot(host, &pdata->slot[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) nr_slots++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) host->buf_size = host->slot[0]->mmc->max_req_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) if (pdata->slot[1].bus_width) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) ret = atmci_init_slot(host, &pdata->slot[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) nr_slots++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) if (host->slot[1]->mmc->max_req_size > host->buf_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) host->buf_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) host->slot[1]->mmc->max_req_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) if (!nr_slots) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) dev_err(&pdev->dev, "init failed: no slot defined\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) goto err_init_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) if (!host->caps.has_rwproof) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) host->buffer = dma_alloc_coherent(&pdev->dev, host->buf_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) &host->buf_phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) if (!host->buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) dev_err(&pdev->dev, "buffer allocation failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) goto err_dma_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) dev_info(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) "Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) host->mapbase, irq, nr_slots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) pm_runtime_mark_last_busy(&host->pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) pm_runtime_put_autosuspend(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) err_dma_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) if (host->slot[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) atmci_cleanup_slot(host->slot[i], i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) err_init_slot:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) clk_disable_unprepare(host->mck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) pm_runtime_put_noidle(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) del_timer_sync(&host->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) if (!IS_ERR(host->dma.chan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) dma_release_channel(host->dma.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) err_dma_probe_defer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) free_irq(irq, host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) static int atmci_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) struct atmel_mci *host = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) pm_runtime_get_sync(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) if (host->buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) dma_free_coherent(&pdev->dev, host->buf_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) host->buffer, host->buf_phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) if (host->slot[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) atmci_cleanup_slot(host->slot[i], i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) atmci_writel(host, ATMCI_IDR, ~0UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) atmci_readl(host, ATMCI_SR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) del_timer_sync(&host->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) if (!IS_ERR(host->dma.chan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) dma_release_channel(host->dma.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) free_irq(platform_get_irq(pdev, 0), host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) clk_disable_unprepare(host->mck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) pm_runtime_put_noidle(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) static int atmci_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) struct atmel_mci *host = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) clk_disable_unprepare(host->mck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) pinctrl_pm_select_sleep_state(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) static int atmci_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) struct atmel_mci *host = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) pinctrl_select_default_state(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) return clk_prepare_enable(host->mck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) static const struct dev_pm_ops atmci_dev_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) pm_runtime_force_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) SET_RUNTIME_PM_OPS(atmci_runtime_suspend, atmci_runtime_resume, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) static struct platform_driver atmci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) .probe = atmci_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) .remove = atmci_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) .name = "atmel_mci",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) .probe_type = PROBE_PREFER_ASYNCHRONOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) .of_match_table = of_match_ptr(atmci_dt_ids),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) .pm = &atmci_dev_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) module_platform_driver(atmci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) MODULE_LICENSE("GPL v2");