^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * i2c Support for Atmel's AT91 Two-Wire Interface (TWI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2011 Weinmann Medical GmbH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: Nikolaus Voss <n.voss@weinmann.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Evolved from original work by:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 2004 Rick Bronson
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Converted to 2.6 by Andrew Victor <andrew@sanpeople.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Borrowed heavily from original work by:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Copyright (C) 2000 Philip Edelbrock <phil@stimpy.netroedge.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/gpio/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/i2c.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/pinctrl/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/platform_data/dma-atmel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "i2c-at91.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) void at91_init_twi_bus_master(struct at91_twi_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct at91_twi_pdata *pdata = dev->pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) u32 filtr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /* FIFO should be enabled immediately after the software reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) if (dev->fifo_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_FIFOEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_MSEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SVDIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) at91_twi_write(dev, AT91_TWI_CWGR, dev->twi_cwgr_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /* enable digital filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (pdata->has_dig_filtr && dev->enable_dig_filt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) filtr |= AT91_TWI_FILTR_FILT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* enable advanced digital filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (pdata->has_adv_dig_filtr && dev->enable_dig_filt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) filtr |= AT91_TWI_FILTR_FILT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) (AT91_TWI_FILTR_THRES(dev->filter_width) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) AT91_TWI_FILTR_THRES_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* enable analog filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (pdata->has_ana_filtr && dev->enable_ana_filt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) filtr |= AT91_TWI_FILTR_PADFEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (filtr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) at91_twi_write(dev, AT91_TWI_FILTR, filtr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * Calculate symmetric clock as stated in datasheet:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * twi_clk = F_MAIN / (2 * (cdiv * (1 << ckdiv) + offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static void at91_calc_twi_clock(struct at91_twi_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) int ckdiv, cdiv, div, hold = 0, filter_width = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct at91_twi_pdata *pdata = dev->pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) int offset = pdata->clk_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) int max_ckdiv = pdata->clk_max_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct i2c_timings timings, *t = &timings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) i2c_parse_fw_timings(dev->dev, t, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) div = max(0, (int)DIV_ROUND_UP(clk_get_rate(dev->clk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) 2 * t->bus_freq_hz) - offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) ckdiv = fls(div >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) cdiv = div >> ckdiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (ckdiv > max_ckdiv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) dev_warn(dev->dev, "%d exceeds ckdiv max value which is %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) ckdiv, max_ckdiv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) ckdiv = max_ckdiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) cdiv = 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (pdata->has_hold_field) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * hold time = HOLD + 3 x T_peripheral_clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * Use clk rate in kHz to prevent overflows when computing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * hold.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) hold = DIV_ROUND_UP(t->sda_hold_ns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * (clk_get_rate(dev->clk) / 1000), 1000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) hold -= 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (hold < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) hold = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (hold > AT91_TWI_CWGR_HOLD_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) dev_warn(dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) "HOLD field set to its maximum value (%d instead of %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) AT91_TWI_CWGR_HOLD_MAX, hold);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) hold = AT91_TWI_CWGR_HOLD_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (pdata->has_adv_dig_filtr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * filter width = 0 to AT91_TWI_FILTR_THRES_MAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * peripheral clocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) filter_width = DIV_ROUND_UP(t->digital_filter_width_ns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * (clk_get_rate(dev->clk) / 1000), 1000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (filter_width > AT91_TWI_FILTR_THRES_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) dev_warn(dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) "Filter threshold set to its maximum value (%d instead of %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) AT91_TWI_FILTR_THRES_MAX, filter_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) filter_width = AT91_TWI_FILTR_THRES_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) dev->twi_cwgr_reg = (ckdiv << 16) | (cdiv << 8) | cdiv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) | AT91_TWI_CWGR_HOLD(hold);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) dev->filter_width = filter_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) dev_dbg(dev->dev, "cdiv %d ckdiv %d hold %d (%d ns), filter_width %d (%d ns)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) cdiv, ckdiv, hold, t->sda_hold_ns, filter_width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) t->digital_filter_width_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static void at91_twi_dma_cleanup(struct at91_twi_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct at91_twi_dma *dma = &dev->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) at91_twi_irq_save(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (dma->xfer_in_progress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (dma->direction == DMA_FROM_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) dmaengine_terminate_all(dma->chan_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) dmaengine_terminate_all(dma->chan_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) dma->xfer_in_progress = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (dma->buf_mapped) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) dma_unmap_single(dev->dev, sg_dma_address(&dma->sg[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) dev->buf_len, dma->direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) dma->buf_mapped = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) at91_twi_irq_restore(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (!dev->buf_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /* 8bit write works with and without FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) writeb_relaxed(*dev->buf, dev->base + AT91_TWI_THR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* send stop when last byte has been written */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (--dev->buf_len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (!dev->use_alt_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_TXRDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) dev_dbg(dev->dev, "wrote 0x%x, to go %zu\n", *dev->buf, dev->buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) ++dev->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static void at91_twi_write_data_dma_callback(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) dev->buf_len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * When this callback is called, THR/TX FIFO is likely not to be empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * yet. So we have to wait for TXCOMP or NACK bits to be set into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * Status Register to be sure that the STOP bit has been sent and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * transfer is completed. The NACK interrupt has already been enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * we just have to enable TXCOMP one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (!dev->use_alt_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static void at91_twi_write_data_dma(struct at91_twi_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct dma_async_tx_descriptor *txdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct at91_twi_dma *dma = &dev->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct dma_chan *chan_tx = dma->chan_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) unsigned int sg_len = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (!dev->buf_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) dma->direction = DMA_TO_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) at91_twi_irq_save(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (dma_mapping_error(dev->dev, dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) dev_err(dev->dev, "dma map failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) dma->buf_mapped = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) at91_twi_irq_restore(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (dev->fifo_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) size_t part1_len, part2_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) unsigned fifo_mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) sg_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) part1_len = dev->buf_len & ~0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (part1_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) sg = &dma->sg[sg_len++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) sg_dma_len(sg) = part1_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) sg_dma_address(sg) = dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) part2_len = dev->buf_len & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (part2_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) sg = &dma->sg[sg_len++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) sg_dma_len(sg) = part2_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) sg_dma_address(sg) = dma_addr + part1_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * DMA controller is triggered when at least 4 data can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * written into the TX FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) fifo_mr &= ~AT91_TWI_FMR_TXRDYM_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) fifo_mr |= AT91_TWI_FMR_TXRDYM(AT91_TWI_FOUR_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) sg_dma_len(&dma->sg[0]) = dev->buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) sg_dma_address(&dma->sg[0]) = dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) txdesc = dmaengine_prep_slave_sg(chan_tx, dma->sg, sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) DMA_MEM_TO_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (!txdesc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) dev_err(dev->dev, "dma prep slave sg failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) txdesc->callback = at91_twi_write_data_dma_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) txdesc->callback_param = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) dma->xfer_in_progress = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) dmaengine_submit(txdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) dma_async_issue_pending(chan_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) at91_twi_dma_cleanup(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * If we are in this case, it means there is garbage data in RHR, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * delete them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (!dev->buf_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) at91_twi_read(dev, AT91_TWI_RHR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /* 8bit read works with and without FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) *dev->buf = readb_relaxed(dev->base + AT91_TWI_RHR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) --dev->buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /* return if aborting, we only needed to read RHR to clear RXRDY*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (dev->recv_len_abort)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /* handle I2C_SMBUS_BLOCK_DATA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (unlikely(dev->msg->flags & I2C_M_RECV_LEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /* ensure length byte is a valid value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (*dev->buf <= I2C_SMBUS_BLOCK_MAX && *dev->buf > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) dev->msg->flags &= ~I2C_M_RECV_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) dev->buf_len += *dev->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) dev->msg->len = dev->buf_len + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) dev_dbg(dev->dev, "received block length %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) dev->buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /* abort and send the stop by reading one more byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) dev->recv_len_abort = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) dev->buf_len = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /* send stop if second but last byte has been read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (!dev->use_alt_cmd && dev->buf_len == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) dev_dbg(dev->dev, "read 0x%x, to go %zu\n", *dev->buf, dev->buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) ++dev->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static void at91_twi_read_data_dma_callback(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) unsigned ier = AT91_TWI_TXCOMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) dev->buf_len, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (!dev->use_alt_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /* The last two bytes have to be read without using dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) dev->buf += dev->buf_len - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) dev->buf_len = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) ier |= AT91_TWI_RXRDY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) at91_twi_write(dev, AT91_TWI_IER, ier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static void at91_twi_read_data_dma(struct at91_twi_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) struct dma_async_tx_descriptor *rxdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct at91_twi_dma *dma = &dev->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct dma_chan *chan_rx = dma->chan_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) size_t buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) buf_len = (dev->use_alt_cmd) ? dev->buf_len : dev->buf_len - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) dma->direction = DMA_FROM_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /* Keep in mind that we won't use dma to read the last two bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) at91_twi_irq_save(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) dma_addr = dma_map_single(dev->dev, dev->buf, buf_len, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (dma_mapping_error(dev->dev, dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) dev_err(dev->dev, "dma map failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) dma->buf_mapped = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) at91_twi_irq_restore(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (dev->fifo_size && IS_ALIGNED(buf_len, 4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) unsigned fifo_mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * DMA controller is triggered when at least 4 data can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * read from the RX FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) fifo_mr &= ~AT91_TWI_FMR_RXRDYM_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) fifo_mr |= AT91_TWI_FMR_RXRDYM(AT91_TWI_FOUR_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) sg_dma_len(&dma->sg[0]) = buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) sg_dma_address(&dma->sg[0]) = dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) rxdesc = dmaengine_prep_slave_sg(chan_rx, dma->sg, 1, DMA_DEV_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (!rxdesc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) dev_err(dev->dev, "dma prep slave sg failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) rxdesc->callback = at91_twi_read_data_dma_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) rxdesc->callback_param = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) dma->xfer_in_progress = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) dmaengine_submit(rxdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) dma_async_issue_pending(dma->chan_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) at91_twi_dma_cleanup(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct at91_twi_dev *dev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) const unsigned status = at91_twi_read(dev, AT91_TWI_SR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) const unsigned irqstatus = status & at91_twi_read(dev, AT91_TWI_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (!irqstatus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * In reception, the behavior of the twi device (before sama5d2) is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * weird. There is some magic about RXRDY flag! When a data has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * almost received, the reception of a new one is anticipated if there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * is no stop command to send. That is the reason why ask for sending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * the stop command not on the last data but on the second last one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * Unfortunately, we could still have the RXRDY flag set even if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * transfer is done and we have read the last data. It might happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * when the i2c slave device sends too quickly data after receiving the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * ack from the master. The data has been almost received before having
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * the order to send stop. In this case, sending the stop command could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * cause a RXRDY interrupt with a TXCOMP one. It is better to manage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * the RXRDY interrupt first in order to not keep garbage data in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * Receive Holding Register for the next transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (irqstatus & AT91_TWI_RXRDY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * Read all available bytes at once by polling RXRDY usable w/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * and w/o FIFO. With FIFO enabled we could also read RXFL and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * avoid polling RXRDY.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) at91_twi_read_next_byte(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) } while (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * When a NACK condition is detected, the I2C controller sets the NACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * TXCOMP and TXRDY bits all together in the Status Register (SR).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * 1 - Handling NACK errors with CPU write transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * In such case, we should not write the next byte into the Transmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * Holding Register (THR) otherwise the I2C controller would start a new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * transfer and the I2C slave is likely to reply by another NACK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * 2 - Handling NACK errors with DMA write transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * By setting the TXRDY bit in the SR, the I2C controller also triggers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * the DMA controller to write the next data into the THR. Then the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * result depends on the hardware version of the I2C controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * 2a - Without support of the Alternative Command mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * This is the worst case: the DMA controller is triggered to write the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * next data into the THR, hence starting a new transfer: the I2C slave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * is likely to reply by another NACK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * Concurrently, this interrupt handler is likely to be called to manage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * the first NACK before the I2C controller detects the second NACK and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * sets once again the NACK bit into the SR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * When handling the first NACK, this interrupt handler disables the I2C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * controller interruptions, especially the NACK interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * Hence, the NACK bit is pending into the SR. This is why we should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * read the SR to clear all pending interrupts at the beginning of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * at91_do_twi_transfer() before actually starting a new transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * 2b - With support of the Alternative Command mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * When a NACK condition is detected, the I2C controller also locks the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * THR (and sets the LOCK bit in the SR): even though the DMA controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * is triggered by the TXRDY bit to write the next data into the THR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * this data actually won't go on the I2C bus hence a second NACK is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * generated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (irqstatus & (AT91_TWI_TXCOMP | AT91_TWI_NACK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) at91_disable_twi_interrupts(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) complete(&dev->cmd_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) } else if (irqstatus & AT91_TWI_TXRDY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) at91_twi_write_next_byte(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) /* catch error flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) dev->transfer_status |= status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) static int at91_do_twi_transfer(struct at91_twi_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) unsigned long time_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) bool has_unre_flag = dev->pdata->has_unre_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) bool has_alt_cmd = dev->pdata->has_alt_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * WARNING: the TXCOMP bit in the Status Register is NOT a clear on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * read flag but shows the state of the transmission at the time the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * Status Register is read. According to the programmer datasheet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * TXCOMP is set when both holding register and internal shifter are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * empty and STOP condition has been sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * Consequently, we should enable NACK interrupt rather than TXCOMP to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * detect transmission failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * Indeed let's take the case of an i2c write command using DMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * Whenever the slave doesn't acknowledge a byte, the LOCK, NACK and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * TXCOMP bits are set together into the Status Register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * LOCK is a clear on write bit, which is set to prevent the DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * controller from sending new data on the i2c bus after a NACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * condition has happened. Once locked, this i2c peripheral stops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * triggering the DMA controller for new data but it is more than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * likely that a new DMA transaction is already in progress, writing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * into the Transmit Holding Register. Since the peripheral is locked,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * these new data won't be sent to the i2c bus but they will remain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * into the Transmit Holding Register, so TXCOMP bit is cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * Then when the interrupt handler is called, the Status Register is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * read: the TXCOMP bit is clear but NACK bit is still set. The driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * manage the error properly, without waiting for timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * This case can be reproduced easyly when writing into an at24 eeprom.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * Besides, the TXCOMP bit is already set before the i2c transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * has been started. For read transactions, this bit is cleared when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * writing the START bit into the Control Register. So the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * corresponding interrupt can safely be enabled just after.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * However for write transactions managed by the CPU, we first write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * into THR, so TXCOMP is cleared. Then we can safely enable TXCOMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * interrupt. If TXCOMP interrupt were enabled before writing into THR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * the interrupt handler would be called immediately and the i2c command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * would be reported as completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * Also when a write transaction is managed by the DMA controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * enabling the TXCOMP interrupt in this function may lead to a race
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * condition since we don't know whether the TXCOMP interrupt is enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * before or after the DMA has started to write into THR. So the TXCOMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * interrupt is enabled later by at91_twi_write_data_dma_callback().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * Immediately after in that DMA callback, if the alternative command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * mode is not used, we still need to send the STOP condition manually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * writing the corresponding bit into the Control Register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) dev_dbg(dev->dev, "transfer: %s %zu bytes.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) (dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) reinit_completion(&dev->cmd_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) dev->transfer_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) /* Clear pending interrupts, such as NACK. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) at91_twi_read(dev, AT91_TWI_SR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (dev->fifo_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) unsigned fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) /* Reset FIFO mode register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) fifo_mr &= ~(AT91_TWI_FMR_TXRDYM_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) AT91_TWI_FMR_RXRDYM_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) fifo_mr |= AT91_TWI_FMR_TXRDYM(AT91_TWI_ONE_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) fifo_mr |= AT91_TWI_FMR_RXRDYM(AT91_TWI_ONE_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /* Flush FIFOs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) at91_twi_write(dev, AT91_TWI_CR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) AT91_TWI_THRCLR | AT91_TWI_RHRCLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (!dev->buf_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) } else if (dev->msg->flags & I2C_M_RD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) unsigned start_flags = AT91_TWI_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) /* if only one byte is to be read, immediately stop transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (!dev->use_alt_cmd && dev->buf_len <= 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) !(dev->msg->flags & I2C_M_RECV_LEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) start_flags |= AT91_TWI_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) at91_twi_write(dev, AT91_TWI_CR, start_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * When using dma without alternative command mode, the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * byte has to be read manually in order to not send the stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * command too late and then to receive extra data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * In practice, there are some issues if you use the dma to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * read n-1 bytes because of latency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * Reading n-2 bytes with dma and the two last ones manually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * seems to be the best solution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) at91_twi_read_data_dma(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) at91_twi_write(dev, AT91_TWI_IER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) AT91_TWI_TXCOMP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) AT91_TWI_NACK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) AT91_TWI_RXRDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) at91_twi_write_data_dma(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) at91_twi_write_next_byte(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) at91_twi_write(dev, AT91_TWI_IER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) AT91_TWI_TXCOMP | AT91_TWI_NACK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) (dev->buf_len ? AT91_TWI_TXRDY : 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) time_left = wait_for_completion_timeout(&dev->cmd_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) dev->adapter.timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (time_left == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) dev->transfer_status |= at91_twi_read(dev, AT91_TWI_SR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) dev_err(dev->dev, "controller timed out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) at91_init_twi_bus(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (dev->transfer_status & AT91_TWI_NACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) dev_dbg(dev->dev, "received nack\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) ret = -EREMOTEIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (dev->transfer_status & AT91_TWI_OVRE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) dev_err(dev->dev, "overrun while reading\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (has_unre_flag && dev->transfer_status & AT91_TWI_UNRE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) dev_err(dev->dev, "underrun while writing\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if ((has_alt_cmd || dev->fifo_size) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) (dev->transfer_status & AT91_TWI_LOCK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) dev_err(dev->dev, "tx locked\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (dev->recv_len_abort) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) dev_err(dev->dev, "invalid smbus block length recvd\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) ret = -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) dev_dbg(dev->dev, "transfer complete\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) /* first stop DMA transfer if still in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) at91_twi_dma_cleanup(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /* then flush THR/FIFO and unlock TX if locked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) if ((has_alt_cmd || dev->fifo_size) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) (dev->transfer_status & AT91_TWI_LOCK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) dev_dbg(dev->dev, "unlock tx\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) at91_twi_write(dev, AT91_TWI_CR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) AT91_TWI_THRCLR | AT91_TWI_LOCKCLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * some faulty I2C slave devices might hold SDA down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * we can send a bus clear command, hoping that the pins will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * released
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) i2c_recover_bus(&dev->adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) struct at91_twi_dev *dev = i2c_get_adapdata(adap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) unsigned int_addr_flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) struct i2c_msg *m_start = msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) bool is_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) ret = pm_runtime_get_sync(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (num == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) int internal_address = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) /* 1st msg is put into the internal address, start with 2nd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) m_start = &msg[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) for (i = 0; i < msg->len; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) const unsigned addr = msg->buf[msg->len - 1 - i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) internal_address |= addr << (8 * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) int_addr_flag += AT91_TWI_IADRSZ_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) at91_twi_write(dev, AT91_TWI_IADR, internal_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) dev->use_alt_cmd = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) is_read = (m_start->flags & I2C_M_RD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (dev->pdata->has_alt_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (m_start->len > 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) m_start->len < AT91_I2C_MAX_ALT_CMD_DATA_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) at91_twi_write(dev, AT91_TWI_ACR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) AT91_TWI_ACR_DATAL(m_start->len) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) ((is_read) ? AT91_TWI_ACR_DIR : 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) dev->use_alt_cmd = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMDIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) at91_twi_write(dev, AT91_TWI_MMR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) (m_start->addr << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) int_addr_flag |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) ((!dev->use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) dev->buf_len = m_start->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) dev->buf = m_start->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) dev->msg = m_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) dev->recv_len_abort = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) ret = at91_do_twi_transfer(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) ret = (ret < 0) ? ret : num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) pm_runtime_mark_last_busy(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) pm_runtime_put_autosuspend(dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * The hardware can handle at most two messages concatenated by a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * repeated start via it's internal address feature.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) static const struct i2c_adapter_quirks at91_twi_quirks = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) .flags = I2C_AQ_COMB | I2C_AQ_COMB_WRITE_FIRST | I2C_AQ_COMB_SAME_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) .max_comb_1st_msg_len = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) static u32 at91_twi_func(struct i2c_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) | I2C_FUNC_SMBUS_READ_BLOCK_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) static const struct i2c_algorithm at91_twi_algorithm = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) .master_xfer = at91_twi_xfer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) .functionality = at91_twi_func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) struct dma_slave_config slave_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) struct at91_twi_dma *dma = &dev->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) enum dma_slave_buswidth addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * The actual width of the access will be chosen in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * dmaengine_prep_slave_sg():
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * for each buffer in the scatter-gather list, if its size is aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * to addr_width then addr_width accesses will be performed to transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * the buffer. On the other hand, if the buffer size is not aligned to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * addr_width then the buffer is transferred using single byte accesses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * Please refer to the Atmel eXtended DMA controller driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * When FIFOs are used, the TXRDYM threshold can always be set to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * trigger the XDMAC when at least 4 data can be written into the TX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * FIFO, even if single byte accesses are performed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * However the RXRDYM threshold must be set to fit the access width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * deduced from buffer length, so the XDMAC is triggered properly to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * read data from the RX FIFO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (dev->fifo_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) memset(&slave_config, 0, sizeof(slave_config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) slave_config.src_addr = (dma_addr_t)phy_addr + AT91_TWI_RHR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) slave_config.src_addr_width = addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) slave_config.src_maxburst = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) slave_config.dst_addr = (dma_addr_t)phy_addr + AT91_TWI_THR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) slave_config.dst_addr_width = addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) slave_config.dst_maxburst = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) slave_config.device_fc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) dma->chan_tx = dma_request_chan(dev->dev, "tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (IS_ERR(dma->chan_tx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) ret = PTR_ERR(dma->chan_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) dma->chan_tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) dma->chan_rx = dma_request_chan(dev->dev, "rx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (IS_ERR(dma->chan_rx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) ret = PTR_ERR(dma->chan_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) dma->chan_rx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) slave_config.direction = DMA_MEM_TO_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (dmaengine_slave_config(dma->chan_tx, &slave_config)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) dev_err(dev->dev, "failed to configure tx channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) slave_config.direction = DMA_DEV_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if (dmaengine_slave_config(dma->chan_rx, &slave_config)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) dev_err(dev->dev, "failed to configure rx channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) sg_init_table(dma->sg, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) dma->buf_mapped = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) dma->xfer_in_progress = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) dev->use_dma = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) dev_info(dev->dev, "using %s (tx) and %s (rx) for DMA transfers\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (ret != -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) dev_info(dev->dev, "can't get DMA channel, continue without DMA support\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if (dma->chan_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) dma_release_channel(dma->chan_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (dma->chan_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) dma_release_channel(dma->chan_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) static int at91_init_twi_recovery_gpio(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) struct at91_twi_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) rinfo->pinctrl = devm_pinctrl_get(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (!rinfo->pinctrl || IS_ERR(rinfo->pinctrl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) dev_info(dev->dev, "can't get pinctrl, bus recovery not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) return PTR_ERR(rinfo->pinctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) dev->adapter.bus_recovery_info = rinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) static int at91_twi_recover_bus_cmd(struct i2c_adapter *adap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) struct at91_twi_dev *dev = i2c_get_adapdata(adap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) dev->transfer_status |= at91_twi_read(dev, AT91_TWI_SR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) if (!(dev->transfer_status & AT91_TWI_SDA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) dev_dbg(dev->dev, "SDA is down; sending bus clear command\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (dev->use_alt_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) unsigned int acr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) acr = at91_twi_read(dev, AT91_TWI_ACR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) acr &= ~AT91_TWI_ACR_DATAL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) at91_twi_write(dev, AT91_TWI_ACR, acr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) static int at91_init_twi_recovery_info(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) struct at91_twi_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) bool has_clear_cmd = dev->pdata->has_clear_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (!has_clear_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return at91_init_twi_recovery_gpio(pdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) rinfo->recover_bus = at91_twi_recover_bus_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) dev->adapter.bus_recovery_info = rinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) int at91_twi_probe_master(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) u32 phy_addr, struct at91_twi_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) init_completion(&dev->cmd_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) rc = devm_request_irq(&pdev->dev, dev->irq, atmel_twi_interrupt, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) dev_name(dev->dev), dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) dev_err(dev->dev, "Cannot get irq %d: %d\n", dev->irq, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if (dev->dev->of_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) rc = at91_twi_configure_dma(dev, phy_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (rc == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) if (!of_property_read_u32(pdev->dev.of_node, "atmel,fifo-size",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) &dev->fifo_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) dev_info(dev->dev, "Using FIFO (%u data)\n", dev->fifo_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) dev->enable_dig_filt = of_property_read_bool(pdev->dev.of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) "i2c-digital-filter");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) dev->enable_ana_filt = of_property_read_bool(pdev->dev.of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) "i2c-analog-filter");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) at91_calc_twi_clock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) rc = at91_init_twi_recovery_info(pdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) if (rc == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) dev->adapter.algo = &at91_twi_algorithm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) dev->adapter.quirks = &at91_twi_quirks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }