Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * i2c-stm32.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) M'boumba Cedric Madianga 2017
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include "i2c-stm32.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) /* Functions for DMA support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) struct stm32_i2c_dma *stm32_i2c_dma_request(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 					    dma_addr_t phy_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 					    u32 txdr_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 					    u32 rxdr_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 	struct stm32_i2c_dma *dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 	struct dma_slave_config dma_sconfig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	if (!dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	/* Request and configure I2C TX dma channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	dma->chan_tx = dma_request_chan(dev, "tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	if (IS_ERR(dma->chan_tx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 		ret = PTR_ERR(dma->chan_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 		if (ret != -ENODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 			ret = dev_err_probe(dev, ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 					    "can't request DMA tx channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 		goto fail_al;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	memset(&dma_sconfig, 0, sizeof(dma_sconfig));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	dma_sconfig.dst_addr = phy_addr + txdr_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	dma_sconfig.dst_maxburst = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	dma_sconfig.direction = DMA_MEM_TO_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	ret = dmaengine_slave_config(dma->chan_tx, &dma_sconfig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 		dev_err(dev, "can't configure tx channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 		goto fail_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	/* Request and configure I2C RX dma channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	dma->chan_rx = dma_request_chan(dev, "rx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	if (IS_ERR(dma->chan_rx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 		ret = PTR_ERR(dma->chan_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 		if (ret != -ENODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 			ret = dev_err_probe(dev, ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 					    "can't request DMA rx channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		goto fail_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	memset(&dma_sconfig, 0, sizeof(dma_sconfig));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	dma_sconfig.src_addr = phy_addr + rxdr_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	dma_sconfig.src_maxburst = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	dma_sconfig.direction = DMA_DEV_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	ret = dmaengine_slave_config(dma->chan_rx, &dma_sconfig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 		dev_err(dev, "can't configure rx channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 		goto fail_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	init_completion(&dma->dma_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	dev_info(dev, "using %s (tx) and %s (rx) for DMA transfers\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		 dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	return dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) fail_rx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	dma_release_channel(dma->chan_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) fail_tx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	dma_release_channel(dma->chan_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) fail_al:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	devm_kfree(dev, dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) void stm32_i2c_dma_free(struct stm32_i2c_dma *dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	dma->dma_buf = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	dma->dma_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	dma_release_channel(dma->chan_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	dma->chan_tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	dma_release_channel(dma->chan_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	dma->chan_rx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	dma->chan_using = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) int stm32_i2c_prep_dma_xfer(struct device *dev, struct stm32_i2c_dma *dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 			    bool rd_wr, u32 len, u8 *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 			    dma_async_tx_callback callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 			    void *dma_async_param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	struct dma_async_tx_descriptor *txdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	struct device *chan_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	if (rd_wr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		dma->chan_using = dma->chan_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		dma->dma_transfer_dir = DMA_DEV_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		dma->dma_data_dir = DMA_FROM_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		dma->chan_using = dma->chan_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		dma->dma_transfer_dir = DMA_MEM_TO_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		dma->dma_data_dir = DMA_TO_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	dma->dma_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	chan_dev = dma->chan_using->device->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	dma->dma_buf = dma_map_single(chan_dev, buf, dma->dma_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 				      dma->dma_data_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	if (dma_mapping_error(chan_dev, dma->dma_buf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		dev_err(dev, "DMA mapping failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	txdesc = dmaengine_prep_slave_single(dma->chan_using, dma->dma_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 					     dma->dma_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 					     dma->dma_transfer_dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 					     DMA_PREP_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	if (!txdesc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		dev_err(dev, "Not able to get desc for DMA xfer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	reinit_completion(&dma->dma_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	txdesc->callback = callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	txdesc->callback_param = dma_async_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	ret = dma_submit_error(dmaengine_submit(txdesc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		dev_err(dev, "DMA submit failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	dma_async_issue_pending(dma->chan_using);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	dma_unmap_single(chan_dev, dma->dma_buf, dma->dma_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 			 dma->dma_data_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }