^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2009-2013, 2016-2018, The Linux Foundation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2014, Sony Mobile Communications AB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/i2c.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /* QUP Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define QUP_CONFIG 0x000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define QUP_STATE 0x004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define QUP_IO_MODE 0x008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define QUP_SW_RESET 0x00c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define QUP_OPERATIONAL 0x018
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define QUP_ERROR_FLAGS 0x01c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define QUP_ERROR_FLAGS_EN 0x020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define QUP_OPERATIONAL_MASK 0x028
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define QUP_HW_VERSION 0x030
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define QUP_MX_OUTPUT_CNT 0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define QUP_OUT_FIFO_BASE 0x110
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define QUP_MX_WRITE_CNT 0x150
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define QUP_MX_INPUT_CNT 0x200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define QUP_MX_READ_CNT 0x208
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define QUP_IN_FIFO_BASE 0x218
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define QUP_I2C_CLK_CTL 0x400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define QUP_I2C_STATUS 0x404
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define QUP_I2C_MASTER_GEN 0x408
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* QUP States and reset values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define QUP_RESET_STATE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define QUP_RUN_STATE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define QUP_PAUSE_STATE 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define QUP_STATE_MASK 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define QUP_STATE_VALID BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define QUP_I2C_MAST_GEN BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define QUP_I2C_FLUSH BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define QUP_OPERATIONAL_RESET 0x000ff0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define QUP_I2C_STATUS_RESET 0xfffffc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /* QUP OPERATIONAL FLAGS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define QUP_I2C_NACK_FLAG BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define QUP_OUT_NOT_EMPTY BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define QUP_IN_NOT_EMPTY BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define QUP_OUT_FULL BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define QUP_OUT_SVC_FLAG BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define QUP_IN_SVC_FLAG BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define QUP_MX_OUTPUT_DONE BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define QUP_MX_INPUT_DONE BIT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define OUT_BLOCK_WRITE_REQ BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define IN_BLOCK_READ_REQ BIT(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /* I2C mini core related values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define QUP_NO_INPUT BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define QUP_CLOCK_AUTO_GATE BIT(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define I2C_MINI_CORE (2 << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define I2C_N_VAL 15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define I2C_N_VAL_V2 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /* Most significant word offset in FIFO port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define QUP_MSW_SHIFT (I2C_N_VAL + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /* Packing/Unpacking words in FIFOs, and IO modes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define QUP_OUTPUT_BLK_MODE (1 << 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define QUP_OUTPUT_BAM_MODE (3 << 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define QUP_INPUT_BLK_MODE (1 << 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define QUP_INPUT_BAM_MODE (3 << 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define QUP_BAM_MODE (QUP_OUTPUT_BAM_MODE | QUP_INPUT_BAM_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define QUP_UNPACK_EN BIT(14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define QUP_PACK_EN BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define QUP_REPACK_EN (QUP_UNPACK_EN | QUP_PACK_EN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define QUP_V2_TAGS_EN 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define QUP_OUTPUT_BLOCK_SIZE(x)(((x) >> 0) & 0x03)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define QUP_OUTPUT_FIFO_SIZE(x) (((x) >> 2) & 0x07)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define QUP_INPUT_BLOCK_SIZE(x) (((x) >> 5) & 0x03)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define QUP_INPUT_FIFO_SIZE(x) (((x) >> 7) & 0x07)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /* QUP tags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define QUP_TAG_START (1 << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define QUP_TAG_DATA (2 << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define QUP_TAG_STOP (3 << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define QUP_TAG_REC (4 << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define QUP_BAM_INPUT_EOT 0x93
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define QUP_BAM_FLUSH_STOP 0x96
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* QUP v2 tags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define QUP_TAG_V2_START 0x81
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define QUP_TAG_V2_DATAWR 0x82
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define QUP_TAG_V2_DATAWR_STOP 0x83
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define QUP_TAG_V2_DATARD 0x85
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define QUP_TAG_V2_DATARD_NACK 0x86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define QUP_TAG_V2_DATARD_STOP 0x87
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /* Status, Error flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define I2C_STATUS_WR_BUFFER_FULL BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define I2C_STATUS_BUS_ACTIVE BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define I2C_STATUS_ERROR_MASK 0x38000fc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define QUP_STATUS_ERROR_FLAGS 0x7c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define QUP_READ_LIMIT 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define SET_BIT 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define RESET_BIT 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define ONE_BYTE 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define QUP_I2C_MX_CONFIG_DURING_RUN BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /* Maximum transfer length for single DMA descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define MX_TX_RX_LEN SZ_64K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define MX_BLOCKS (MX_TX_RX_LEN / QUP_READ_LIMIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /* Maximum transfer length for all DMA descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define MX_DMA_TX_RX_LEN (2 * MX_TX_RX_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define MX_DMA_BLOCKS (MX_DMA_TX_RX_LEN / QUP_READ_LIMIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * Minimum transfer timeout for i2c transfers in seconds. It will be added on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * the top of maximum transfer time calculated from i2c bus speed to compensate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * the overheads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define TOUT_MIN 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /* Default values. Use these if FW query fails */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define DEFAULT_CLK_FREQ I2C_MAX_STANDARD_MODE_FREQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define DEFAULT_SRC_CLK 20000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * Max tags length (start, stop and maximum 2 bytes address) for each QUP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * data transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define QUP_MAX_TAGS_LEN 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /* Max data length for each DATARD tags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #define RECV_MAX_DATA_LEN 254
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /* TAG length for DATA READ in RX FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #define READ_RX_TAGS_LEN 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static unsigned int scl_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) module_param_named(scl_freq, scl_freq, uint, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) MODULE_PARM_DESC(scl_freq, "SCL frequency override");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * count: no of blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * pos: current block number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * tx_tag_len: tx tag length for current block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * rx_tag_len: rx tag length for current block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * data_len: remaining data length for current message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * cur_blk_len: data length for current block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * total_tx_len: total tx length including tag bytes for current QUP transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * total_rx_len: total rx length including tag bytes for current QUP transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * tx_fifo_data_pos: current byte number in TX FIFO word
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * tx_fifo_free: number of free bytes in current QUP block write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * rx_fifo_data_pos: current byte number in RX FIFO word
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * fifo_available: number of available bytes in RX FIFO for current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * QUP block read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * tx_fifo_data: QUP TX FIFO write works on word basis (4 bytes). New byte write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * to TX FIFO will be appended in this data and will be written to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * TX FIFO when all the 4 bytes are available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * rx_fifo_data: QUP RX FIFO read works on word basis (4 bytes). This will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * contains the 4 bytes of RX data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * cur_data: pointer to tell cur data position for current message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * cur_tx_tags: pointer to tell cur position in tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * tx_tags_sent: all tx tag bytes have been written in FIFO word
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * send_last_word: for tx FIFO, last word send is pending in current block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * rx_bytes_read: if all the bytes have been read from rx FIFO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * rx_tags_fetched: all the rx tag bytes have been fetched from rx fifo word
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * is_tx_blk_mode: whether tx uses block or FIFO mode in case of non BAM xfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * is_rx_blk_mode: whether rx uses block or FIFO mode in case of non BAM xfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * tags: contains tx tag bytes for current QUP transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct qup_i2c_block {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) int pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) int tx_tag_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) int rx_tag_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) int data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) int cur_blk_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) int total_tx_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) int total_rx_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) int tx_fifo_data_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) int tx_fifo_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) int rx_fifo_data_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) int fifo_available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) u32 tx_fifo_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) u32 rx_fifo_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) u8 *cur_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) u8 *cur_tx_tags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) bool tx_tags_sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) bool send_last_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) bool rx_tags_fetched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) bool rx_bytes_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) bool is_tx_blk_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) bool is_rx_blk_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) u8 tags[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct qup_i2c_tag {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) u8 *start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) dma_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct qup_i2c_bam {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct qup_i2c_tag tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct dma_chan *dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) unsigned int sg_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct qup_i2c_dev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct clk *pclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct i2c_adapter adap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) int clk_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) int out_fifo_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) int in_fifo_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) int out_blk_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) int in_blk_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) int blk_xfer_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) unsigned long one_byte_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) unsigned long xfer_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct qup_i2c_block blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct i2c_msg *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /* Current posion in user message buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) int pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /* I2C protocol errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) u32 bus_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /* QUP core errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) u32 qup_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /* To check if this is the last msg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) bool is_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) bool is_smbus_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /* To configure when bus is in run state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) u32 config_run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) /* dma parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) bool is_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) /* To check if the current transfer is using DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) bool use_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) unsigned int max_xfer_sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) unsigned int tag_buf_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /* The threshold length above which block mode will be used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) unsigned int blk_mode_threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct dma_pool *dpool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct qup_i2c_tag start_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct qup_i2c_bam brx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct qup_i2c_bam btx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct completion xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /* function to write data in tx fifo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) void (*write_tx_fifo)(struct qup_i2c_dev *qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /* function to read data from rx fifo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) void (*read_rx_fifo)(struct qup_i2c_dev *qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /* function to write tags in tx fifo for i2c read transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) void (*write_rx_tags)(struct qup_i2c_dev *qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) static irqreturn_t qup_i2c_interrupt(int irq, void *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct qup_i2c_dev *qup = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct qup_i2c_block *blk = &qup->blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) u32 bus_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) u32 qup_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) u32 opflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) bus_err = readl(qup->base + QUP_I2C_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) qup_err = readl(qup->base + QUP_ERROR_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) opflags = readl(qup->base + QUP_OPERATIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (!qup->msg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /* Clear Error interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) writel(QUP_RESET_STATE, qup->base + QUP_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) bus_err &= I2C_STATUS_ERROR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) qup_err &= QUP_STATUS_ERROR_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /* Clear the error bits in QUP_ERROR_FLAGS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (qup_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) writel(qup_err, qup->base + QUP_ERROR_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /* Clear the error bits in QUP_I2C_STATUS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (bus_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) writel(bus_err, qup->base + QUP_I2C_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * Check for BAM mode and returns if already error has come for current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * transfer. In Error case, sometimes, QUP generates more than one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (qup->use_dma && (qup->qup_err || qup->bus_err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /* Reset the QUP State in case of error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (qup_err || bus_err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * Don’t reset the QUP state in case of BAM mode. The BAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * flush operation needs to be scheduled in transfer function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * which will clear the remaining schedule descriptors in BAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * HW FIFO and generates the BAM interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (!qup->use_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) writel(QUP_RESET_STATE, qup->base + QUP_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (opflags & QUP_OUT_SVC_FLAG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) writel(QUP_OUT_SVC_FLAG, qup->base + QUP_OPERATIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (opflags & OUT_BLOCK_WRITE_REQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) blk->tx_fifo_free += qup->out_blk_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (qup->msg->flags & I2C_M_RD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) qup->write_rx_tags(qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) qup->write_tx_fifo(qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (opflags & QUP_IN_SVC_FLAG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) writel(QUP_IN_SVC_FLAG, qup->base + QUP_OPERATIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (!blk->is_rx_blk_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) blk->fifo_available += qup->in_fifo_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) qup->read_rx_fifo(qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) } else if (opflags & IN_BLOCK_READ_REQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) blk->fifo_available += qup->in_blk_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) qup->read_rx_fifo(qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (qup->msg->flags & I2C_M_RD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (!blk->rx_bytes_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * Ideally, QUP_MAX_OUTPUT_DONE_FLAG should be checked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * for FIFO mode also. But, QUP_MAX_OUTPUT_DONE_FLAG lags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * behind QUP_OUTPUT_SERVICE_FLAG sometimes. The only reason
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * of interrupt for write message in FIFO mode is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * QUP_MAX_OUTPUT_DONE_FLAG condition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (blk->is_tx_blk_mode && !(opflags & QUP_MX_OUTPUT_DONE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) qup->qup_err = qup_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) qup->bus_err = bus_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) complete(&qup->xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) static int qup_i2c_poll_state_mask(struct qup_i2c_dev *qup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) u32 req_state, u32 req_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) int retries = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) u32 state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * State transition takes 3 AHB clocks cycles + 3 I2C master clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * cycles. So retry once after a 1uS delay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) state = readl(qup->base + QUP_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (state & QUP_STATE_VALID &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) (state & req_mask) == req_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) } while (retries--);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) static int qup_i2c_poll_state(struct qup_i2c_dev *qup, u32 req_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return qup_i2c_poll_state_mask(qup, req_state, QUP_STATE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) static void qup_i2c_flush(struct qup_i2c_dev *qup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) u32 val = readl(qup->base + QUP_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) val |= QUP_I2C_FLUSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) writel(val, qup->base + QUP_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) static int qup_i2c_poll_state_valid(struct qup_i2c_dev *qup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return qup_i2c_poll_state_mask(qup, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static int qup_i2c_poll_state_i2c_master(struct qup_i2c_dev *qup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return qup_i2c_poll_state_mask(qup, QUP_I2C_MAST_GEN, QUP_I2C_MAST_GEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) static int qup_i2c_change_state(struct qup_i2c_dev *qup, u32 state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (qup_i2c_poll_state_valid(qup) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) writel(state, qup->base + QUP_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (qup_i2c_poll_state(qup, state) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /* Check if I2C bus returns to IDLE state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static int qup_i2c_bus_active(struct qup_i2c_dev *qup, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) timeout = jiffies + len * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) status = readl(qup->base + QUP_I2C_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (!(status & I2C_STATUS_BUS_ACTIVE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (time_after(jiffies, timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) usleep_range(len, len * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) static void qup_i2c_write_tx_fifo_v1(struct qup_i2c_dev *qup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) struct qup_i2c_block *blk = &qup->blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) struct i2c_msg *msg = qup->msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) u32 addr = i2c_8bit_addr_from_msg(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) u32 qup_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (qup->pos == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) val = QUP_TAG_START | addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) blk->tx_fifo_free--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) while (blk->tx_fifo_free && qup->pos < msg->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (qup->pos == msg->len - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) qup_tag = QUP_TAG_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) qup_tag = QUP_TAG_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (idx & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) val |= (qup_tag | msg->buf[qup->pos]) << QUP_MSW_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) val = qup_tag | msg->buf[qup->pos];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) /* Write out the pair and the last odd value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (idx & 1 || qup->pos == msg->len - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) writel(val, qup->base + QUP_OUT_FIFO_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) qup->pos++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) blk->tx_fifo_free--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) static void qup_i2c_set_blk_data(struct qup_i2c_dev *qup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) struct i2c_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) qup->blk.pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) qup->blk.data_len = msg->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) qup->blk.count = DIV_ROUND_UP(msg->len, qup->blk_xfer_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) static int qup_i2c_get_data_len(struct qup_i2c_dev *qup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) int data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (qup->blk.data_len > qup->blk_xfer_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) data_len = qup->blk_xfer_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) data_len = qup->blk.data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) return data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) static bool qup_i2c_check_msg_len(struct i2c_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return ((msg->flags & I2C_M_RD) && (msg->flags & I2C_M_RECV_LEN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) static int qup_i2c_set_tags_smb(u16 addr, u8 *tags, struct qup_i2c_dev *qup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct i2c_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) int len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (qup->is_smbus_read) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) tags[len++] = QUP_TAG_V2_DATARD_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) tags[len++] = qup_i2c_get_data_len(qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) tags[len++] = QUP_TAG_V2_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) tags[len++] = addr & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (msg->flags & I2C_M_TEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) tags[len++] = addr >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) tags[len++] = QUP_TAG_V2_DATARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) /* Read 1 byte indicating the length of the SMBus message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) tags[len++] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) static int qup_i2c_set_tags(u8 *tags, struct qup_i2c_dev *qup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct i2c_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) u16 addr = i2c_8bit_addr_from_msg(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) int len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) int data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) int last = (qup->blk.pos == (qup->blk.count - 1)) && (qup->is_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) /* Handle tags for SMBus block read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (qup_i2c_check_msg_len(msg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) return qup_i2c_set_tags_smb(addr, tags, qup, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (qup->blk.pos == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) tags[len++] = QUP_TAG_V2_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) tags[len++] = addr & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (msg->flags & I2C_M_TEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) tags[len++] = addr >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) /* Send _STOP commands for the last block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (msg->flags & I2C_M_RD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) tags[len++] = QUP_TAG_V2_DATARD_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) tags[len++] = QUP_TAG_V2_DATAWR_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (msg->flags & I2C_M_RD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) tags[len++] = qup->blk.pos == (qup->blk.count - 1) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) QUP_TAG_V2_DATARD_NACK :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) QUP_TAG_V2_DATARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) tags[len++] = QUP_TAG_V2_DATAWR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) data_len = qup_i2c_get_data_len(qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /* 0 implies 256 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (data_len == QUP_READ_LIMIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) tags[len++] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) tags[len++] = data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) static void qup_i2c_bam_cb(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) struct qup_i2c_dev *qup = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) complete(&qup->xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) static int qup_sg_set_buf(struct scatterlist *sg, void *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) unsigned int buflen, struct qup_i2c_dev *qup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) int dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) sg_set_buf(sg, buf, buflen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) ret = dma_map_sg(qup->dev, sg, 1, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) static void qup_i2c_rel_dma(struct qup_i2c_dev *qup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (qup->btx.dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) dma_release_channel(qup->btx.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (qup->brx.dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) dma_release_channel(qup->brx.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) qup->btx.dma = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) qup->brx.dma = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) static int qup_i2c_req_dma(struct qup_i2c_dev *qup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (!qup->btx.dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) qup->btx.dma = dma_request_chan(qup->dev, "tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (IS_ERR(qup->btx.dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) err = PTR_ERR(qup->btx.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) qup->btx.dma = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) dev_err(qup->dev, "\n tx channel not available");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (!qup->brx.dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) qup->brx.dma = dma_request_chan(qup->dev, "rx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (IS_ERR(qup->brx.dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) dev_err(qup->dev, "\n rx channel not available");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) err = PTR_ERR(qup->brx.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) qup->brx.dma = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) qup_i2c_rel_dma(qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) static int qup_i2c_bam_make_desc(struct qup_i2c_dev *qup, struct i2c_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) int ret = 0, limit = QUP_READ_LIMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) u32 len = 0, blocks, rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) u32 i = 0, tlen, tx_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) u8 *tags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) qup->blk_xfer_limit = QUP_READ_LIMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) qup_i2c_set_blk_data(qup, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) blocks = qup->blk.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) rem = msg->len - (blocks - 1) * limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (msg->flags & I2C_M_RD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) while (qup->blk.pos < blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) tlen = (i == (blocks - 1)) ? rem : limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) tags = &qup->start_tag.start[qup->tag_buf_pos + len];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) len += qup_i2c_set_tags(tags, qup, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) qup->blk.data_len -= tlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) /* scratch buf to read the start and len tags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) ret = qup_sg_set_buf(&qup->brx.sg[qup->brx.sg_cnt++],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) &qup->brx.tag.start[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 2, qup, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) ret = qup_sg_set_buf(&qup->brx.sg[qup->brx.sg_cnt++],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) &msg->buf[limit * i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) tlen, qup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) qup->blk.pos = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) ret = qup_sg_set_buf(&qup->btx.sg[qup->btx.sg_cnt++],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) &qup->start_tag.start[qup->tag_buf_pos],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) len, qup, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) qup->tag_buf_pos += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) while (qup->blk.pos < blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) tlen = (i == (blocks - 1)) ? rem : limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) tags = &qup->start_tag.start[qup->tag_buf_pos + tx_len];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) len = qup_i2c_set_tags(tags, qup, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) qup->blk.data_len -= tlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) ret = qup_sg_set_buf(&qup->btx.sg[qup->btx.sg_cnt++],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) tags, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) qup, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) tx_len += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) ret = qup_sg_set_buf(&qup->btx.sg[qup->btx.sg_cnt++],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) &msg->buf[limit * i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) tlen, qup, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) qup->blk.pos = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) qup->tag_buf_pos += tx_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) static int qup_i2c_bam_schedule_desc(struct qup_i2c_dev *qup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) struct dma_async_tx_descriptor *txd, *rxd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) dma_cookie_t cookie_rx, cookie_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) u32 len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) u32 tx_cnt = qup->btx.sg_cnt, rx_cnt = qup->brx.sg_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) /* schedule the EOT and FLUSH I2C tags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) len = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (rx_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) qup->btx.tag.start[0] = QUP_BAM_INPUT_EOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) len++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) /* scratch buf to read the BAM EOT FLUSH tags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) ret = qup_sg_set_buf(&qup->brx.sg[rx_cnt++],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) &qup->brx.tag.start[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 1, qup, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) qup->btx.tag.start[len - 1] = QUP_BAM_FLUSH_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) ret = qup_sg_set_buf(&qup->btx.sg[tx_cnt++], &qup->btx.tag.start[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) len, qup, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) txd = dmaengine_prep_slave_sg(qup->btx.dma, qup->btx.sg, tx_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) DMA_MEM_TO_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) DMA_PREP_INTERRUPT | DMA_PREP_FENCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (!txd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) dev_err(qup->dev, "failed to get tx desc\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) goto desc_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (!rx_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) txd->callback = qup_i2c_bam_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) txd->callback_param = qup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) cookie_tx = dmaengine_submit(txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (dma_submit_error(cookie_tx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) goto desc_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) dma_async_issue_pending(qup->btx.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (rx_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) rxd = dmaengine_prep_slave_sg(qup->brx.dma, qup->brx.sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) rx_cnt, DMA_DEV_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) DMA_PREP_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (!rxd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) dev_err(qup->dev, "failed to get rx desc\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) /* abort TX descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) dmaengine_terminate_all(qup->btx.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) goto desc_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) rxd->callback = qup_i2c_bam_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) rxd->callback_param = qup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) cookie_rx = dmaengine_submit(rxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (dma_submit_error(cookie_rx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) goto desc_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) dma_async_issue_pending(qup->brx.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (!wait_for_completion_timeout(&qup->xfer, qup->xfer_timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) dev_err(qup->dev, "normal trans timed out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (ret || qup->bus_err || qup->qup_err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) reinit_completion(&qup->xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) dev_err(qup->dev, "change to run state timed out");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) goto desc_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) qup_i2c_flush(qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) /* wait for remaining interrupts to occur */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (!wait_for_completion_timeout(&qup->xfer, HZ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) dev_err(qup->dev, "flush timed out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) ret = (qup->bus_err & QUP_I2C_NACK_FLAG) ? -ENXIO : -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) desc_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) dma_unmap_sg(qup->dev, qup->btx.sg, tx_cnt, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (rx_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) dma_unmap_sg(qup->dev, qup->brx.sg, rx_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) static void qup_i2c_bam_clear_tag_buffers(struct qup_i2c_dev *qup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) qup->btx.sg_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) qup->brx.sg_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) qup->tag_buf_pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) static int qup_i2c_bam_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) struct qup_i2c_dev *qup = i2c_get_adapdata(adap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) int idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) enable_irq(qup->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) ret = qup_i2c_req_dma(qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) writel(0, qup->base + QUP_MX_INPUT_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) writel(0, qup->base + QUP_MX_OUTPUT_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) /* set BAM mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) writel(QUP_REPACK_EN | QUP_BAM_MODE, qup->base + QUP_IO_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) /* mask fifo irqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) writel((0x3 << 8), qup->base + QUP_OPERATIONAL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) /* set RUN STATE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) qup_i2c_bam_clear_tag_buffers(qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) for (idx = 0; idx < num; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) qup->msg = msg + idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) qup->is_last = idx == (num - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) ret = qup_i2c_bam_make_desc(qup, qup->msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * Make DMA descriptor and schedule the BAM transfer if its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * already crossed the maximum length. Since the memory for all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) * tags buffers have been taken for 2 maximum possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * transfers length so it will never cross the buffer actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (qup->btx.sg_cnt > qup->max_xfer_sg_len ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) qup->brx.sg_cnt > qup->max_xfer_sg_len ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) qup->is_last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) ret = qup_i2c_bam_schedule_desc(qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) qup_i2c_bam_clear_tag_buffers(qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) disable_irq(qup->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) qup->msg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) static int qup_i2c_wait_for_complete(struct qup_i2c_dev *qup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) struct i2c_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) unsigned long left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) left = wait_for_completion_timeout(&qup->xfer, qup->xfer_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) if (!left) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) writel(1, qup->base + QUP_SW_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (qup->bus_err || qup->qup_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) ret = (qup->bus_err & QUP_I2C_NACK_FLAG) ? -ENXIO : -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) static void qup_i2c_read_rx_fifo_v1(struct qup_i2c_dev *qup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) struct qup_i2c_block *blk = &qup->blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) struct i2c_msg *msg = qup->msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) int idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) while (blk->fifo_available && qup->pos < msg->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) if ((idx & 1) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) /* Reading 2 words at time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) val = readl(qup->base + QUP_IN_FIFO_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) msg->buf[qup->pos++] = val & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) msg->buf[qup->pos++] = val >> QUP_MSW_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) blk->fifo_available--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (qup->pos == msg->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) blk->rx_bytes_read = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) static void qup_i2c_write_rx_tags_v1(struct qup_i2c_dev *qup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) struct i2c_msg *msg = qup->msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) u32 addr, len, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) addr = i2c_8bit_addr_from_msg(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) /* 0 is used to specify a length 256 (QUP_READ_LIMIT) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) len = (msg->len == QUP_READ_LIMIT) ? 0 : msg->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) val = ((QUP_TAG_REC | len) << QUP_MSW_SHIFT) | QUP_TAG_START | addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) writel(val, qup->base + QUP_OUT_FIFO_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) static void qup_i2c_conf_v1(struct qup_i2c_dev *qup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) struct qup_i2c_block *blk = &qup->blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) u32 qup_config = I2C_MINI_CORE | I2C_N_VAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) u32 io_mode = QUP_REPACK_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) blk->is_tx_blk_mode = blk->total_tx_len > qup->out_fifo_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) blk->is_rx_blk_mode = blk->total_rx_len > qup->in_fifo_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (blk->is_tx_blk_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) io_mode |= QUP_OUTPUT_BLK_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) writel(0, qup->base + QUP_MX_WRITE_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) writel(blk->total_tx_len, qup->base + QUP_MX_OUTPUT_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) writel(0, qup->base + QUP_MX_OUTPUT_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) writel(blk->total_tx_len, qup->base + QUP_MX_WRITE_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) if (blk->total_rx_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) if (blk->is_rx_blk_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) io_mode |= QUP_INPUT_BLK_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) writel(0, qup->base + QUP_MX_READ_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) writel(blk->total_rx_len, qup->base + QUP_MX_INPUT_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) writel(0, qup->base + QUP_MX_INPUT_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) writel(blk->total_rx_len, qup->base + QUP_MX_READ_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) qup_config |= QUP_NO_INPUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) writel(qup_config, qup->base + QUP_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) writel(io_mode, qup->base + QUP_IO_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) static void qup_i2c_clear_blk_v1(struct qup_i2c_block *blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) blk->tx_fifo_free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) blk->fifo_available = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) blk->rx_bytes_read = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) static int qup_i2c_conf_xfer_v1(struct qup_i2c_dev *qup, bool is_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) struct qup_i2c_block *blk = &qup->blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) qup_i2c_clear_blk_v1(blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) qup_i2c_conf_v1(qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) ret = qup_i2c_change_state(qup, QUP_PAUSE_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) reinit_completion(&qup->xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) enable_irq(qup->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) if (!blk->is_tx_blk_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) blk->tx_fifo_free = qup->out_fifo_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) if (is_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) qup_i2c_write_rx_tags_v1(qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) qup_i2c_write_tx_fifo_v1(qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) ret = qup_i2c_wait_for_complete(qup, qup->msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) ret = qup_i2c_bus_active(qup, ONE_BYTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) disable_irq(qup->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) static int qup_i2c_write_one(struct qup_i2c_dev *qup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) struct i2c_msg *msg = qup->msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) struct qup_i2c_block *blk = &qup->blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) qup->pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) blk->total_tx_len = msg->len + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) blk->total_rx_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) return qup_i2c_conf_xfer_v1(qup, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) static int qup_i2c_read_one(struct qup_i2c_dev *qup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) struct qup_i2c_block *blk = &qup->blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) qup->pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) blk->total_tx_len = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) blk->total_rx_len = qup->msg->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) return qup_i2c_conf_xfer_v1(qup, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) static int qup_i2c_xfer(struct i2c_adapter *adap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) struct i2c_msg msgs[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) struct qup_i2c_dev *qup = i2c_get_adapdata(adap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) int ret, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) ret = pm_runtime_get_sync(qup->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) qup->bus_err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) qup->qup_err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) writel(1, qup->base + QUP_SW_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) ret = qup_i2c_poll_state(qup, QUP_RESET_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) /* Configure QUP as I2C mini core */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) writel(I2C_MINI_CORE | I2C_N_VAL, qup->base + QUP_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) for (idx = 0; idx < num; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (qup_i2c_poll_state_i2c_master(qup)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (qup_i2c_check_msg_len(&msgs[idx])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) qup->msg = &msgs[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) if (msgs[idx].flags & I2C_M_RD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) ret = qup_i2c_read_one(qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) ret = qup_i2c_write_one(qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) ret = qup_i2c_change_state(qup, QUP_RESET_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) ret = num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) pm_runtime_mark_last_busy(qup->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) pm_runtime_put_autosuspend(qup->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) * Configure registers related with reconfiguration during run and call it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) * before each i2c sub transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) static void qup_i2c_conf_count_v2(struct qup_i2c_dev *qup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) struct qup_i2c_block *blk = &qup->blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) u32 qup_config = I2C_MINI_CORE | I2C_N_VAL_V2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) if (blk->is_tx_blk_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) writel(qup->config_run | blk->total_tx_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) qup->base + QUP_MX_OUTPUT_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) writel(qup->config_run | blk->total_tx_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) qup->base + QUP_MX_WRITE_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (blk->total_rx_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) if (blk->is_rx_blk_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) writel(qup->config_run | blk->total_rx_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) qup->base + QUP_MX_INPUT_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) writel(qup->config_run | blk->total_rx_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) qup->base + QUP_MX_READ_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) qup_config |= QUP_NO_INPUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) writel(qup_config, qup->base + QUP_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) * Configure registers related with transfer mode (FIFO/Block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) * before starting of i2c transfer. It will be called only once in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) * QUP RESET state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) static void qup_i2c_conf_mode_v2(struct qup_i2c_dev *qup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) struct qup_i2c_block *blk = &qup->blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) u32 io_mode = QUP_REPACK_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if (blk->is_tx_blk_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) io_mode |= QUP_OUTPUT_BLK_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) writel(0, qup->base + QUP_MX_WRITE_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) writel(0, qup->base + QUP_MX_OUTPUT_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) if (blk->is_rx_blk_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) io_mode |= QUP_INPUT_BLK_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) writel(0, qup->base + QUP_MX_READ_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) writel(0, qup->base + QUP_MX_INPUT_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) writel(io_mode, qup->base + QUP_IO_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) /* Clear required variables before starting of any QUP v2 sub transfer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) static void qup_i2c_clear_blk_v2(struct qup_i2c_block *blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) blk->send_last_word = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) blk->tx_tags_sent = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) blk->tx_fifo_data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) blk->tx_fifo_data_pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) blk->tx_fifo_free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) blk->rx_tags_fetched = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) blk->rx_bytes_read = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) blk->rx_fifo_data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) blk->rx_fifo_data_pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) blk->fifo_available = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) /* Receive data from RX FIFO for read message in QUP v2 i2c transfer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) static void qup_i2c_recv_data(struct qup_i2c_dev *qup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) struct qup_i2c_block *blk = &qup->blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) for (j = blk->rx_fifo_data_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) blk->cur_blk_len && blk->fifo_available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) blk->cur_blk_len--, blk->fifo_available--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (j == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) blk->rx_fifo_data = readl(qup->base + QUP_IN_FIFO_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) *(blk->cur_data++) = blk->rx_fifo_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) blk->rx_fifo_data >>= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) if (j == 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) j++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) blk->rx_fifo_data_pos = j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) /* Receive tags for read message in QUP v2 i2c transfer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) static void qup_i2c_recv_tags(struct qup_i2c_dev *qup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) struct qup_i2c_block *blk = &qup->blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) blk->rx_fifo_data = readl(qup->base + QUP_IN_FIFO_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) blk->rx_fifo_data >>= blk->rx_tag_len * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) blk->rx_fifo_data_pos = blk->rx_tag_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) blk->fifo_available -= blk->rx_tag_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) * Read the data and tags from RX FIFO. Since in read case, the tags will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) * preceded by received data bytes so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) * 1. Check if rx_tags_fetched is false i.e. the start of QUP block so receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) * all tag bytes and discard that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) * 2. Read the data from RX FIFO. When all the data bytes have been read then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) * set rx_bytes_read to true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) static void qup_i2c_read_rx_fifo_v2(struct qup_i2c_dev *qup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) struct qup_i2c_block *blk = &qup->blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) if (!blk->rx_tags_fetched) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) qup_i2c_recv_tags(qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) blk->rx_tags_fetched = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) qup_i2c_recv_data(qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) if (!blk->cur_blk_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) blk->rx_bytes_read = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) * Write bytes in TX FIFO for write message in QUP v2 i2c transfer. QUP TX FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) * write works on word basis (4 bytes). Append new data byte write for TX FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) * in tx_fifo_data and write to TX FIFO when all the 4 bytes are present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) qup_i2c_write_blk_data(struct qup_i2c_dev *qup, u8 **data, unsigned int *len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) struct qup_i2c_block *blk = &qup->blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) unsigned int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) for (j = blk->tx_fifo_data_pos; *len && blk->tx_fifo_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) (*len)--, blk->tx_fifo_free--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) blk->tx_fifo_data |= *(*data)++ << (j * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) if (j == 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) writel(blk->tx_fifo_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) qup->base + QUP_OUT_FIFO_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) blk->tx_fifo_data = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) j++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) blk->tx_fifo_data_pos = j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) /* Transfer tags for read message in QUP v2 i2c transfer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) static void qup_i2c_write_rx_tags_v2(struct qup_i2c_dev *qup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) struct qup_i2c_block *blk = &qup->blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) qup_i2c_write_blk_data(qup, &blk->cur_tx_tags, &blk->tx_tag_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) if (blk->tx_fifo_data_pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) writel(blk->tx_fifo_data, qup->base + QUP_OUT_FIFO_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) * Write the data and tags in TX FIFO. Since in write case, both tags and data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) * need to be written and QUP write tags can have maximum 256 data length, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) * 1. Check if tx_tags_sent is false i.e. the start of QUP block so write the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) * tags to TX FIFO and set tx_tags_sent to true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) * 2. Check if send_last_word is true. It will be set when last few data bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) * (less than 4 bytes) are reamining to be written in FIFO because of no FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) * space. All this data bytes are available in tx_fifo_data so write this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) * in FIFO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) * 3. Write the data to TX FIFO and check for cur_blk_len. If it is non zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) * then more data is pending otherwise following 3 cases can be possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) * a. if tx_fifo_data_pos is zero i.e. all the data bytes in this block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) * have been written in TX FIFO so nothing else is required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) * b. tx_fifo_free is non zero i.e tx FIFO is free so copy the remaining data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) * from tx_fifo_data to tx FIFO. Since, qup_i2c_write_blk_data do write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) * in 4 bytes and FIFO space is in multiple of 4 bytes so tx_fifo_free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) * will be always greater than or equal to 4 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) * c. tx_fifo_free is zero. In this case, last few bytes (less than 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) * bytes) are copied to tx_fifo_data but couldn't be sent because of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) * FIFO full so make send_last_word true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) static void qup_i2c_write_tx_fifo_v2(struct qup_i2c_dev *qup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) struct qup_i2c_block *blk = &qup->blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) if (!blk->tx_tags_sent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) qup_i2c_write_blk_data(qup, &blk->cur_tx_tags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) &blk->tx_tag_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) blk->tx_tags_sent = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) if (blk->send_last_word)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) goto send_last_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) qup_i2c_write_blk_data(qup, &blk->cur_data, &blk->cur_blk_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) if (!blk->cur_blk_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) if (!blk->tx_fifo_data_pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) if (blk->tx_fifo_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) goto send_last_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) blk->send_last_word = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) send_last_word:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) writel(blk->tx_fifo_data, qup->base + QUP_OUT_FIFO_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) * Main transfer function which read or write i2c data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) * The QUP v2 supports reconfiguration during run in which multiple i2c sub
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) * transfers can be scheduled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) qup_i2c_conf_xfer_v2(struct qup_i2c_dev *qup, bool is_rx, bool is_first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) bool change_pause_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) struct qup_i2c_block *blk = &qup->blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) struct i2c_msg *msg = qup->msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) * Check if its SMBus Block read for which the top level read will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) * done into 2 QUP reads. One with message length 1 while other one is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) * with actual length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) if (qup_i2c_check_msg_len(msg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) if (qup->is_smbus_read) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) * If the message length is already read in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) * the first byte of the buffer, account for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) * that by setting the offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) blk->cur_data += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) is_first = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) change_pause_state = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) qup->config_run = is_first ? 0 : QUP_I2C_MX_CONFIG_DURING_RUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) qup_i2c_clear_blk_v2(blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) qup_i2c_conf_count_v2(qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) /* If it is first sub transfer, then configure i2c bus clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) if (is_first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) ret = qup_i2c_change_state(qup, QUP_PAUSE_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) reinit_completion(&qup->xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) enable_irq(qup->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) * In FIFO mode, tx FIFO can be written directly while in block mode the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) * it will be written after getting OUT_BLOCK_WRITE_REQ interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) if (!blk->is_tx_blk_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) blk->tx_fifo_free = qup->out_fifo_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) if (is_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) qup_i2c_write_rx_tags_v2(qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) qup_i2c_write_tx_fifo_v2(qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) ret = qup_i2c_wait_for_complete(qup, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) /* Move to pause state for all the transfers, except last one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) if (change_pause_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) ret = qup_i2c_change_state(qup, QUP_PAUSE_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) disable_irq(qup->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) * Transfer one read/write message in i2c transfer. It splits the message into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) * multiple of blk_xfer_limit data length blocks and schedule each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) * QUP block individually.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) static int qup_i2c_xfer_v2_msg(struct qup_i2c_dev *qup, int msg_id, bool is_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) unsigned int data_len, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) struct i2c_msg *msg = qup->msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) struct qup_i2c_block *blk = &qup->blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) u8 *msg_buf = msg->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) qup->blk_xfer_limit = is_rx ? RECV_MAX_DATA_LEN : QUP_READ_LIMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) qup_i2c_set_blk_data(qup, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) for (i = 0; i < blk->count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) data_len = qup_i2c_get_data_len(qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) blk->pos = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) blk->cur_tx_tags = blk->tags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) blk->cur_blk_len = data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) blk->tx_tag_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) qup_i2c_set_tags(blk->cur_tx_tags, qup, qup->msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) blk->cur_data = msg_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) if (is_rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) blk->total_tx_len = blk->tx_tag_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) blk->rx_tag_len = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) blk->total_rx_len = blk->rx_tag_len + data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) blk->total_tx_len = blk->tx_tag_len + data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) blk->total_rx_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) ret = qup_i2c_conf_xfer_v2(qup, is_rx, !msg_id && !i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) !qup->is_last || i < blk->count - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) /* Handle SMBus block read length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) if (qup_i2c_check_msg_len(msg) && msg->len == 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) !qup->is_smbus_read) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) if (msg->buf[0] > I2C_SMBUS_BLOCK_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) msg->len = msg->buf[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) qup->is_smbus_read = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) ret = qup_i2c_xfer_v2_msg(qup, msg_id, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) qup->is_smbus_read = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) msg->len += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) msg_buf += data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) blk->data_len -= qup->blk_xfer_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) * QUP v2 supports 3 modes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) * Programmed IO using FIFO mode : Less than FIFO size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) * Programmed IO using Block mode : Greater than FIFO size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) * DMA using BAM : Appropriate for any transaction size but the address should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) * be DMA applicable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) * This function determines the mode which will be used for this transfer. An
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) * i2c transfer contains multiple message. Following are the rules to determine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) * the mode used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) * 1. Determine complete length, maximum tx and rx length for complete transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) * 2. If complete transfer length is greater than fifo size then use the DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) * mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) * 3. In FIFO or block mode, tx and rx can operate in different mode so check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) * for maximum tx and rx length to determine mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) qup_i2c_determine_mode_v2(struct qup_i2c_dev *qup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) struct i2c_msg msgs[], int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) bool no_dma = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) unsigned int max_tx_len = 0, max_rx_len = 0, total_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) /* All i2c_msgs should be transferred using either dma or cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) for (idx = 0; idx < num; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) if (msgs[idx].flags & I2C_M_RD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) max_rx_len = max_t(unsigned int, max_rx_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) msgs[idx].len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) max_tx_len = max_t(unsigned int, max_tx_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) msgs[idx].len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) if (is_vmalloc_addr(msgs[idx].buf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) no_dma = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) total_len += msgs[idx].len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) if (!no_dma && qup->is_dma &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) (total_len > qup->out_fifo_sz || total_len > qup->in_fifo_sz)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) qup->use_dma = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) qup->blk.is_tx_blk_mode = max_tx_len > qup->out_fifo_sz -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) QUP_MAX_TAGS_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) qup->blk.is_rx_blk_mode = max_rx_len > qup->in_fifo_sz -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) READ_RX_TAGS_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) static int qup_i2c_xfer_v2(struct i2c_adapter *adap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) struct i2c_msg msgs[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) struct qup_i2c_dev *qup = i2c_get_adapdata(adap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) int ret, idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) qup->bus_err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) qup->qup_err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) ret = pm_runtime_get_sync(qup->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) ret = qup_i2c_determine_mode_v2(qup, msgs, num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) writel(1, qup->base + QUP_SW_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) ret = qup_i2c_poll_state(qup, QUP_RESET_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) /* Configure QUP as I2C mini core */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) writel(I2C_MINI_CORE | I2C_N_VAL_V2, qup->base + QUP_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) writel(QUP_V2_TAGS_EN, qup->base + QUP_I2C_MASTER_GEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) if (qup_i2c_poll_state_i2c_master(qup)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) if (qup->use_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) reinit_completion(&qup->xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) ret = qup_i2c_bam_xfer(adap, &msgs[0], num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) qup->use_dma = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) qup_i2c_conf_mode_v2(qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) for (idx = 0; idx < num; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) qup->msg = &msgs[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) qup->is_last = idx == (num - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) ret = qup_i2c_xfer_v2_msg(qup, idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) !!(msgs[idx].flags & I2C_M_RD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) qup->msg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) ret = qup_i2c_bus_active(qup, ONE_BYTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) qup_i2c_change_state(qup, QUP_RESET_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) ret = num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) pm_runtime_mark_last_busy(qup->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) pm_runtime_put_autosuspend(qup->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) static u32 qup_i2c_func(struct i2c_adapter *adap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) static const struct i2c_algorithm qup_i2c_algo = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) .master_xfer = qup_i2c_xfer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) .functionality = qup_i2c_func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) static const struct i2c_algorithm qup_i2c_algo_v2 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) .master_xfer = qup_i2c_xfer_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) .functionality = qup_i2c_func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) * The QUP block will issue a NACK and STOP on the bus when reaching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) * the end of the read, the length of the read is specified as one byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) * which limits the possible read to 256 (QUP_READ_LIMIT) bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) static const struct i2c_adapter_quirks qup_i2c_quirks = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) .flags = I2C_AQ_NO_ZERO_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) .max_read_len = QUP_READ_LIMIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) static const struct i2c_adapter_quirks qup_i2c_quirks_v2 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) .flags = I2C_AQ_NO_ZERO_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) static void qup_i2c_enable_clocks(struct qup_i2c_dev *qup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) clk_prepare_enable(qup->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) clk_prepare_enable(qup->pclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) static void qup_i2c_disable_clocks(struct qup_i2c_dev *qup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) u32 config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) qup_i2c_change_state(qup, QUP_RESET_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) clk_disable_unprepare(qup->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) config = readl(qup->base + QUP_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) config |= QUP_CLOCK_AUTO_GATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) writel(config, qup->base + QUP_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) clk_disable_unprepare(qup->pclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) static const struct acpi_device_id qup_i2c_acpi_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) { "QCOM8010"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) MODULE_DEVICE_TABLE(acpi, qup_i2c_acpi_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) static int qup_i2c_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) static const int blk_sizes[] = {4, 16, 32};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) struct qup_i2c_dev *qup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) unsigned long one_bit_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) u32 io_mode, hw_ver, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) int ret, fs_div, hs_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) u32 src_clk_freq = DEFAULT_SRC_CLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) u32 clk_freq = DEFAULT_CLK_FREQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) int blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) bool is_qup_v1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) qup = devm_kzalloc(&pdev->dev, sizeof(*qup), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) if (!qup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) qup->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) init_completion(&qup->xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) platform_set_drvdata(pdev, qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (scl_freq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) dev_notice(qup->dev, "Using override frequency of %u\n", scl_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) clk_freq = scl_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) ret = device_property_read_u32(qup->dev, "clock-frequency", &clk_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) dev_notice(qup->dev, "using default clock-frequency %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) DEFAULT_CLK_FREQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) if (of_device_is_compatible(pdev->dev.of_node, "qcom,i2c-qup-v1.1.1")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) qup->adap.algo = &qup_i2c_algo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) qup->adap.quirks = &qup_i2c_quirks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) is_qup_v1 = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) qup->adap.algo = &qup_i2c_algo_v2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) qup->adap.quirks = &qup_i2c_quirks_v2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) is_qup_v1 = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) if (acpi_match_device(qup_i2c_acpi_match, qup->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) goto nodma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) ret = qup_i2c_req_dma(qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) if (ret == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) goto fail_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) else if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) goto nodma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) qup->max_xfer_sg_len = (MX_BLOCKS << 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) blocks = (MX_DMA_BLOCKS << 1) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) qup->btx.sg = devm_kcalloc(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) blocks, sizeof(*qup->btx.sg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) if (!qup->btx.sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) goto fail_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) sg_init_table(qup->btx.sg, blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) qup->brx.sg = devm_kcalloc(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) blocks, sizeof(*qup->brx.sg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) if (!qup->brx.sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) goto fail_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) sg_init_table(qup->brx.sg, blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) /* 2 tag bytes for each block + 5 for start, stop tags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) size = blocks * 2 + 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) qup->start_tag.start = devm_kzalloc(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) if (!qup->start_tag.start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) goto fail_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) qup->brx.tag.start = devm_kzalloc(&pdev->dev, 2, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) if (!qup->brx.tag.start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) goto fail_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) qup->btx.tag.start = devm_kzalloc(&pdev->dev, 2, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) if (!qup->btx.tag.start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) goto fail_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) qup->is_dma = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) nodma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) /* We support frequencies up to FAST Mode Plus (1MHz) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) if (!clk_freq || clk_freq > I2C_MAX_FAST_MODE_PLUS_FREQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) dev_err(qup->dev, "clock frequency not supported %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) clk_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) qup->base = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) if (IS_ERR(qup->base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) return PTR_ERR(qup->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) qup->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) if (qup->irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) return qup->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) if (has_acpi_companion(qup->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) ret = device_property_read_u32(qup->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) "src-clock-hz", &src_clk_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) dev_notice(qup->dev, "using default src-clock-hz %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) DEFAULT_SRC_CLK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) ACPI_COMPANION_SET(&qup->adap.dev, ACPI_COMPANION(qup->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) qup->clk = devm_clk_get(qup->dev, "core");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) if (IS_ERR(qup->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) dev_err(qup->dev, "Could not get core clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) return PTR_ERR(qup->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) qup->pclk = devm_clk_get(qup->dev, "iface");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) if (IS_ERR(qup->pclk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) dev_err(qup->dev, "Could not get iface clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) return PTR_ERR(qup->pclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) qup_i2c_enable_clocks(qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) src_clk_freq = clk_get_rate(qup->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) * Bootloaders might leave a pending interrupt on certain QUP's,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) * so we reset the core before registering for interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) writel(1, qup->base + QUP_SW_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) ret = qup_i2c_poll_state_valid(qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) ret = devm_request_irq(qup->dev, qup->irq, qup_i2c_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) IRQF_TRIGGER_HIGH, "i2c_qup", qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) dev_err(qup->dev, "Request %d IRQ failed\n", qup->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) disable_irq(qup->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) hw_ver = readl(qup->base + QUP_HW_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) dev_dbg(qup->dev, "Revision %x\n", hw_ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) io_mode = readl(qup->base + QUP_IO_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) * The block/fifo size w.r.t. 'actual data' is 1/2 due to 'tag'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) * associated with each byte written/received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) size = QUP_OUTPUT_BLOCK_SIZE(io_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) if (size >= ARRAY_SIZE(blk_sizes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) qup->out_blk_sz = blk_sizes[size];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) size = QUP_INPUT_BLOCK_SIZE(io_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) if (size >= ARRAY_SIZE(blk_sizes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) qup->in_blk_sz = blk_sizes[size];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) if (is_qup_v1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) * in QUP v1, QUP_CONFIG uses N as 15 i.e 16 bits constitutes a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) * single transfer but the block size is in bytes so divide the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) * in_blk_sz and out_blk_sz by 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) qup->in_blk_sz /= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) qup->out_blk_sz /= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) qup->write_tx_fifo = qup_i2c_write_tx_fifo_v1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) qup->read_rx_fifo = qup_i2c_read_rx_fifo_v1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) qup->write_rx_tags = qup_i2c_write_rx_tags_v1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) qup->write_tx_fifo = qup_i2c_write_tx_fifo_v2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) qup->read_rx_fifo = qup_i2c_read_rx_fifo_v2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) qup->write_rx_tags = qup_i2c_write_rx_tags_v2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) size = QUP_OUTPUT_FIFO_SIZE(io_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) qup->out_fifo_sz = qup->out_blk_sz * (2 << size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) size = QUP_INPUT_FIFO_SIZE(io_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) qup->in_fifo_sz = qup->in_blk_sz * (2 << size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) hs_div = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) if (clk_freq <= I2C_MAX_STANDARD_MODE_FREQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) fs_div = ((src_clk_freq / clk_freq) / 2) - 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) qup->clk_ctl = (hs_div << 8) | (fs_div & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) /* 33%/66% duty cycle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) fs_div = ((src_clk_freq / clk_freq) - 6) * 2 / 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) qup->clk_ctl = ((fs_div / 2) << 16) | (hs_div << 8) | (fs_div & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) * Time it takes for a byte to be clocked out on the bus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) * Each byte takes 9 clock cycles (8 bits + 1 ack).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) one_bit_t = (USEC_PER_SEC / clk_freq) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) qup->one_byte_t = one_bit_t * 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) qup->xfer_timeout = TOUT_MIN * HZ +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) usecs_to_jiffies(MX_DMA_TX_RX_LEN * qup->one_byte_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) dev_dbg(qup->dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) qup->in_blk_sz, qup->in_fifo_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) qup->out_blk_sz, qup->out_fifo_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) i2c_set_adapdata(&qup->adap, qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) qup->adap.dev.parent = qup->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) qup->adap.dev.of_node = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) qup->is_last = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) strlcpy(qup->adap.name, "QUP I2C adapter", sizeof(qup->adap.name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) pm_runtime_set_autosuspend_delay(qup->dev, MSEC_PER_SEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) pm_runtime_use_autosuspend(qup->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) pm_runtime_set_active(qup->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) pm_runtime_enable(qup->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) ret = i2c_add_adapter(&qup->adap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) goto fail_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) fail_runtime:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) pm_runtime_disable(qup->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) pm_runtime_set_suspended(qup->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) qup_i2c_disable_clocks(qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) fail_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) if (qup->btx.dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) dma_release_channel(qup->btx.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) if (qup->brx.dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) dma_release_channel(qup->brx.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) static int qup_i2c_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) struct qup_i2c_dev *qup = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) if (qup->is_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) dma_release_channel(qup->btx.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) dma_release_channel(qup->brx.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) disable_irq(qup->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) qup_i2c_disable_clocks(qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) i2c_del_adapter(&qup->adap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) pm_runtime_disable(qup->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) pm_runtime_set_suspended(qup->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) static int qup_i2c_pm_suspend_runtime(struct device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) struct qup_i2c_dev *qup = dev_get_drvdata(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) dev_dbg(device, "pm_runtime: suspending...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) qup_i2c_disable_clocks(qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) static int qup_i2c_pm_resume_runtime(struct device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) struct qup_i2c_dev *qup = dev_get_drvdata(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) dev_dbg(device, "pm_runtime: resuming...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) qup_i2c_enable_clocks(qup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) static int qup_i2c_suspend(struct device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) if (!pm_runtime_suspended(device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) return qup_i2c_pm_suspend_runtime(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) static int qup_i2c_resume(struct device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) qup_i2c_pm_resume_runtime(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) pm_runtime_mark_last_busy(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) pm_request_autosuspend(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) static const struct dev_pm_ops qup_i2c_qup_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) SET_SYSTEM_SLEEP_PM_OPS(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) qup_i2c_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) qup_i2c_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) SET_RUNTIME_PM_OPS(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) qup_i2c_pm_suspend_runtime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) qup_i2c_pm_resume_runtime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) static const struct of_device_id qup_i2c_dt_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) { .compatible = "qcom,i2c-qup-v1.1.1" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) { .compatible = "qcom,i2c-qup-v2.1.1" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) { .compatible = "qcom,i2c-qup-v2.2.1" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) MODULE_DEVICE_TABLE(of, qup_i2c_dt_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) static struct platform_driver qup_i2c_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) .probe = qup_i2c_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) .remove = qup_i2c_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) .name = "i2c_qup",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) .pm = &qup_i2c_qup_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) .of_match_table = qup_i2c_dt_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) .acpi_match_table = ACPI_PTR(qup_i2c_acpi_match),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) module_platform_driver(qup_i2c_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) MODULE_ALIAS("platform:i2c_qup");