Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Freescale MPC85xx, MPC83xx DMA Engine support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Author:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *   Zhang Wei <wei.zhang@freescale.com>, Jul 2007
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *   Ebony Zhu <ebony.zhu@freescale.com>, May 2007
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *   DMA engine driver for Freescale MPC8540 DMA controller, which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  *   also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *   The support for MPC8349 DMA controller is also added.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * This driver instructs the DMA controller to issue the PCI Read Multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  * command for PCI read operations, instead of using the default PCI Read Line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  * command. Please be aware that this setting may result in read pre-fetching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  * on some platforms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/fsldma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include "dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include "fsldma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #define chan_dbg(chan, fmt, arg...)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 	dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #define chan_err(chan, fmt, arg...)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) static const char msg_ld_oom[] = "No free memory for link descriptor";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46)  * Register Helpers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) static void set_sr(struct fsldma_chan *chan, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	FSL_DMA_OUT(chan, &chan->regs->sr, val, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) static u32 get_sr(struct fsldma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	return FSL_DMA_IN(chan, &chan->regs->sr, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) static void set_mr(struct fsldma_chan *chan, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	FSL_DMA_OUT(chan, &chan->regs->mr, val, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) static u32 get_mr(struct fsldma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	return FSL_DMA_IN(chan, &chan->regs->mr, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	FSL_DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) static dma_addr_t get_cdar(struct fsldma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	return FSL_DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) static void set_bcr(struct fsldma_chan *chan, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	FSL_DMA_OUT(chan, &chan->regs->bcr, val, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) static u32 get_bcr(struct fsldma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	return FSL_DMA_IN(chan, &chan->regs->bcr, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90)  * Descriptor Helpers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) static void set_desc_cnt(struct fsldma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 				struct fsl_dma_ld_hw *hw, u32 count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	hw->count = CPU_TO_DMA(chan, count, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) static void set_desc_src(struct fsldma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 			 struct fsl_dma_ld_hw *hw, dma_addr_t src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	u64 snoop_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 		? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) static void set_desc_dst(struct fsldma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 			 struct fsl_dma_ld_hw *hw, dma_addr_t dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	u64 snoop_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 		? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) static void set_desc_next(struct fsldma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 			  struct fsl_dma_ld_hw *hw, dma_addr_t next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	u64 snoop_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 		? FSL_DMA_SNEN : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	u64 snoop_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		? FSL_DMA_SNEN : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	desc->hw.next_ln_addr = CPU_TO_DMA(chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 			| snoop_bits, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142)  * DMA Engine Hardware Control Helpers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) static void dma_init(struct fsldma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	/* Reset the channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	set_mr(chan, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	switch (chan->feature & FSL_DMA_IP_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	case FSL_DMA_IP_85XX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 		/* Set the channel to below modes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 		 * EIE - Error interrupt enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 		 * EOLNIE - End of links interrupt enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 		 * BWC - Bandwidth sharing among channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 		set_mr(chan, FSL_DMA_MR_BWC | FSL_DMA_MR_EIE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 			| FSL_DMA_MR_EOLNIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	case FSL_DMA_IP_83XX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 		/* Set the channel to below modes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 		 * EOTIE - End-of-transfer interrupt enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 		 * PRC_RM - PCI read multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 		set_mr(chan, FSL_DMA_MR_EOTIE | FSL_DMA_MR_PRC_RM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) static int dma_is_idle(struct fsldma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	u32 sr = get_sr(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177)  * Start the DMA controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179)  * Preconditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180)  * - the CDAR register must point to the start descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181)  * - the MRn[CS] bit must be cleared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) static void dma_start(struct fsldma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	u32 mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	mode = get_mr(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 		set_bcr(chan, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 		mode |= FSL_DMA_MR_EMP_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		mode &= ~FSL_DMA_MR_EMP_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	if (chan->feature & FSL_DMA_CHAN_START_EXT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 		mode |= FSL_DMA_MR_EMS_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		mode &= ~FSL_DMA_MR_EMS_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		mode |= FSL_DMA_MR_CS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	set_mr(chan, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) static void dma_halt(struct fsldma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	u32 mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	/* read the mode register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	mode = get_mr(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	 * The 85xx controller supports channel abort, which will stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	 * the current transfer. On 83xx, this bit is the transfer error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	 * mask bit, which should not be changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 		mode |= FSL_DMA_MR_CA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		set_mr(chan, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 		mode &= ~FSL_DMA_MR_CA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	/* stop the DMA controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	set_mr(chan, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	/* wait for the DMA controller to become idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	for (i = 0; i < 100; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 		if (dma_is_idle(chan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	if (!dma_is_idle(chan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 		chan_err(chan, "DMA halt timeout!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243)  * fsl_chan_set_src_loop_size - Set source address hold transfer size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244)  * @chan : Freescale DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245)  * @size     : Address loop size, 0 for disable loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247)  * The set source address hold transfer size. The source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248)  * address hold or loop transfer size is when the DMA transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249)  * data from source address (SA), if the loop size is 4, the DMA will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250)  * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251)  * SA + 1 ... and so on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	u32 mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	mode = get_mr(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	switch (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 		mode &= ~FSL_DMA_MR_SAHE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		mode &= ~FSL_DMA_MR_SAHTS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	set_mr(chan, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276)  * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277)  * @chan : Freescale DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278)  * @size     : Address loop size, 0 for disable loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280)  * The set destination address hold transfer size. The destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281)  * address hold or loop transfer size is when the DMA transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282)  * data to destination address (TA), if the loop size is 4, the DMA will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283)  * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284)  * TA + 1 ... and so on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	u32 mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	mode = get_mr(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	switch (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		mode &= ~FSL_DMA_MR_DAHE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		mode &= ~FSL_DMA_MR_DAHTS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	set_mr(chan, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309)  * fsl_chan_set_request_count - Set DMA Request Count for external control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310)  * @chan : Freescale DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311)  * @size     : Number of bytes to transfer in a single request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313)  * The Freescale DMA channel can be controlled by the external signal DREQ#.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314)  * The DMA request count is how many bytes are allowed to transfer before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315)  * pausing the channel, after which a new assertion of DREQ# resumes channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316)  * operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318)  * A size of 0 disables external pause control. The maximum size is 1024.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	u32 mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	BUG_ON(size > 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	mode = get_mr(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	mode &= ~FSL_DMA_MR_BWC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	mode |= (__ilog2(size) << 24) & FSL_DMA_MR_BWC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	set_mr(chan, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334)  * fsl_chan_toggle_ext_pause - Toggle channel external pause status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335)  * @chan : Freescale DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336)  * @enable   : 0 is disabled, 1 is enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338)  * The Freescale DMA channel can be controlled by the external signal DREQ#.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339)  * The DMA Request Count feature should be used in addition to this feature
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340)  * to set the number of bytes to transfer before pausing the channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 		chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351)  * fsl_chan_toggle_ext_start - Toggle channel external start status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352)  * @chan : Freescale DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353)  * @enable   : 0 is disabled, 1 is enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355)  * If enable the external start, the channel can be started by an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356)  * external DMA start pin. So the dma_start() does not start the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357)  * transfer immediately. The DMA channel will wait for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358)  * control pin asserted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		chan->feature |= FSL_DMA_CHAN_START_EXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		chan->feature &= ~FSL_DMA_CHAN_START_EXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) int fsl_dma_external_start(struct dma_chan *dchan, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	struct fsldma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	if (!dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	chan = to_fsl_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	fsl_chan_toggle_ext_start(chan, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) EXPORT_SYMBOL_GPL(fsl_dma_external_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	if (list_empty(&chan->ld_pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		goto out_splice;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	 * Add the hardware descriptor to the chain of hardware descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	 * that already exists in memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	 * This will un-set the EOL bit of the existing transaction, and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	 * last link in this transaction will become the EOL descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	set_desc_next(chan, &tail->hw, desc->async_tx.phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	 * Add the software descriptor and all children to the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	 * of pending transactions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) out_splice:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	struct fsldma_chan *chan = to_fsl_chan(tx->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	struct fsl_desc_sw *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	dma_cookie_t cookie = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	spin_lock_bh(&chan->desc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	if (unlikely(chan->pm_state != RUNNING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		chan_dbg(chan, "cannot submit due to suspend\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		spin_unlock_bh(&chan->desc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	 * assign cookies to all of the software descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	 * that make up this transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	list_for_each_entry(child, &desc->tx_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		cookie = dma_cookie_assign(&child->async_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	/* put this transaction onto the tail of the pending queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	append_ld_queue(chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	spin_unlock_bh(&chan->desc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	return cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440)  * fsl_dma_free_descriptor - Free descriptor from channel's DMA pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441)  * @chan : Freescale DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442)  * @desc: descriptor to be freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) static void fsl_dma_free_descriptor(struct fsldma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		struct fsl_desc_sw *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	list_del(&desc->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	chan_dbg(chan, "LD %p free\n", desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453)  * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454)  * @chan : Freescale DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456)  * Return - The descriptor allocated. NULL for failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	struct fsl_desc_sw *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	dma_addr_t pdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		chan_dbg(chan, "out of memory for link descriptor\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	INIT_LIST_HEAD(&desc->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	desc->async_tx.tx_submit = fsl_dma_tx_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	desc->async_tx.phys = pdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	chan_dbg(chan, "LD %p allocated\n", desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480)  * fsldma_clean_completed_descriptor - free all descriptors which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481)  * has been completed and acked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482)  * @chan: Freescale DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484)  * This function is used on all completed and acked descriptors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485)  * All descriptors should only be freed in this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) static void fsldma_clean_completed_descriptor(struct fsldma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	struct fsl_desc_sw *desc, *_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	/* Run the callback for each descriptor, in order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		if (async_tx_test_ack(&desc->async_tx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 			fsl_dma_free_descriptor(chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498)  * fsldma_run_tx_complete_actions - cleanup a single link descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499)  * @chan: Freescale DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500)  * @desc: descriptor to cleanup and free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501)  * @cookie: Freescale DMA transaction identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503)  * This function is used on a descriptor which has been executed by the DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504)  * controller. It will run any callbacks, submit any dependencies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) static dma_cookie_t fsldma_run_tx_complete_actions(struct fsldma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		struct fsl_desc_sw *desc, dma_cookie_t cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	struct dma_async_tx_descriptor *txd = &desc->async_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	dma_cookie_t ret = cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	BUG_ON(txd->cookie < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	if (txd->cookie > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		ret = txd->cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		dma_descriptor_unmap(txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		/* Run the link descriptor callback function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		dmaengine_desc_get_callback_invoke(txd, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	/* Run any dependencies */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	dma_run_dependencies(txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529)  * fsldma_clean_running_descriptor - move the completed descriptor from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530)  * ld_running to ld_completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531)  * @chan: Freescale DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532)  * @desc: the descriptor which is completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534)  * Free the descriptor directly if acked by async_tx api, or move it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535)  * queue ld_completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) static void fsldma_clean_running_descriptor(struct fsldma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		struct fsl_desc_sw *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	/* Remove from the list of transactions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	list_del(&desc->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	 * the client is allowed to attach dependent operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	 * until 'ack' is set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	if (!async_tx_test_ack(&desc->async_tx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		 * Move this descriptor to the list of descriptors which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		 * completed, but still awaiting the 'ack' bit to be set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		list_add_tail(&desc->node, &chan->ld_completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560)  * fsl_chan_xfer_ld_queue - transfer any pending transactions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561)  * @chan : Freescale DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563)  * HARDWARE STATE: idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564)  * LOCKING: must hold chan->desc_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	struct fsl_desc_sw *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	 * If the list of pending descriptors is empty, then we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	 * don't need to do any work at all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	if (list_empty(&chan->ld_pending)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		chan_dbg(chan, "no pending LDs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	 * The DMA controller is not idle, which means that the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	 * handler will start any queued transactions when it runs after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	 * this transaction finishes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	if (!chan->idle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		chan_dbg(chan, "DMA controller still busy\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	 * If there are some link descriptors which have not been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	 * transferred, we need to start the controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	 * Move all elements from the queue of pending transactions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	 * onto the list of running transactions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	chan_dbg(chan, "idle, starting controller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	 * The 85xx DMA controller doesn't clear the channel start bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	 * automatically at the end of a transfer. Therefore we must clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	 * it in software before starting the transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		u32 mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		mode = get_mr(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		mode &= ~FSL_DMA_MR_CS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		set_mr(chan, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	 * Program the descriptor's address into the DMA controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	 * then start the DMA transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	set_cdar(chan, desc->async_tx.phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	get_cdar(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	dma_start(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	chan->idle = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627)  * fsldma_cleanup_descriptors - cleanup link descriptors which are completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628)  * and move them to ld_completed to free until flag 'ack' is set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629)  * @chan: Freescale DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631)  * This function is used on descriptors which have been executed by the DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632)  * controller. It will run any callbacks, submit any dependencies, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633)  * free these descriptors if flag 'ack' is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) static void fsldma_cleanup_descriptors(struct fsldma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	struct fsl_desc_sw *desc, *_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	dma_cookie_t cookie = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	dma_addr_t curr_phys = get_cdar(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	int seen_current = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	fsldma_clean_completed_descriptor(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	/* Run the callback for each descriptor, in order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 		 * do not advance past the current descriptor loaded into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		 * hardware channel, subsequent descriptors are either in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		 * process or have not been submitted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		if (seen_current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		 * stop the search if we reach the current descriptor and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		 * channel is busy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		if (desc->async_tx.phys == curr_phys) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 			seen_current = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 			if (!dma_is_idle(chan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		cookie = fsldma_run_tx_complete_actions(chan, desc, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		fsldma_clean_running_descriptor(chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	 * Start any pending transactions automatically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	 * In the ideal case, we keep the DMA controller busy while we go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	 * ahead and free the descriptors below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	fsl_chan_xfer_ld_queue(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	if (cookie > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		chan->common.completed_cookie = cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682)  * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683)  * @chan : Freescale DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685)  * This function will create a dma pool for descriptor allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687)  * Return - The number of descriptors allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	struct fsldma_chan *chan = to_fsl_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	/* Has this channel already been allocated? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	if (chan->desc_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	 * We need the descriptor to be aligned to 32bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	 * for meeting FSL DMA specification requirement.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	chan->desc_pool = dma_pool_create(chan->name, chan->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 					  sizeof(struct fsl_desc_sw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 					  __alignof__(struct fsl_desc_sw), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	if (!chan->desc_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		chan_err(chan, "unable to allocate descriptor pool\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	/* there is at least one descriptor free to be allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714)  * fsldma_free_desc_list - Free all descriptors in a queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715)  * @chan: Freescae DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716)  * @list: the list to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718)  * LOCKING: must hold chan->desc_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) static void fsldma_free_desc_list(struct fsldma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 				  struct list_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	struct fsl_desc_sw *desc, *_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	list_for_each_entry_safe(desc, _desc, list, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		fsl_dma_free_descriptor(chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 					  struct list_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	struct fsl_desc_sw *desc, *_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	list_for_each_entry_safe_reverse(desc, _desc, list, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		fsl_dma_free_descriptor(chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739)  * fsl_dma_free_chan_resources - Free all resources of the channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740)  * @chan : Freescale DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	struct fsldma_chan *chan = to_fsl_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	chan_dbg(chan, "free all channel resources\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	spin_lock_bh(&chan->desc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	fsldma_cleanup_descriptors(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	fsldma_free_desc_list(chan, &chan->ld_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	fsldma_free_desc_list(chan, &chan->ld_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	fsldma_free_desc_list(chan, &chan->ld_completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	spin_unlock_bh(&chan->desc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	dma_pool_destroy(chan->desc_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	chan->desc_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) fsl_dma_prep_memcpy(struct dma_chan *dchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	dma_addr_t dma_dst, dma_addr_t dma_src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	struct fsldma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	size_t copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	if (!dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	if (!len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	chan = to_fsl_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		/* Allocate the link descriptor from DMA pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		new = fsl_dma_alloc_descriptor(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		if (!new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 			chan_err(chan, "%s\n", msg_ld_oom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 			goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		set_desc_cnt(chan, &new->hw, copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		set_desc_src(chan, &new->hw, dma_src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		set_desc_dst(chan, &new->hw, dma_dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		if (!first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 			first = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 			set_desc_next(chan, &prev->hw, new->async_tx.phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		new->async_tx.cookie = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		async_tx_ack(&new->async_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		prev = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		len -= copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		dma_src += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		dma_dst += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		/* Insert the link descriptor to the LD ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		list_add_tail(&new->node, &first->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	} while (len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	new->async_tx.flags = flags; /* client is in control of this ack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	new->async_tx.cookie = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	/* Set End-of-link to the last link descriptor of new list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	set_ld_eol(chan, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	return &first->async_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	if (!first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	fsldma_free_desc_list_reverse(chan, &first->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) static int fsl_dma_device_terminate_all(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	struct fsldma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	if (!dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	chan = to_fsl_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	spin_lock_bh(&chan->desc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	/* Halt the DMA engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	dma_halt(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	/* Remove and free all of the descriptors in the LD queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	fsldma_free_desc_list(chan, &chan->ld_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	fsldma_free_desc_list(chan, &chan->ld_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	fsldma_free_desc_list(chan, &chan->ld_completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	chan->idle = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	spin_unlock_bh(&chan->desc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) static int fsl_dma_device_config(struct dma_chan *dchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 				 struct dma_slave_config *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	struct fsldma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	if (!dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	chan = to_fsl_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	/* make sure the channel supports setting burst size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	if (!chan->set_request_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	/* we set the controller burst size depending on direction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	if (config->direction == DMA_MEM_TO_DEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		size = config->dst_addr_width * config->dst_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		size = config->src_addr_width * config->src_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	chan->set_request_count(chan, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874)  * fsl_dma_memcpy_issue_pending - Issue the DMA start command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875)  * @chan : Freescale DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	struct fsldma_chan *chan = to_fsl_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	spin_lock_bh(&chan->desc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	fsl_chan_xfer_ld_queue(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	spin_unlock_bh(&chan->desc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887)  * fsl_tx_status - Determine the DMA status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888)  * @chan : Freescale DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) static enum dma_status fsl_tx_status(struct dma_chan *dchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 					dma_cookie_t cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 					struct dma_tx_state *txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	struct fsldma_chan *chan = to_fsl_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	enum dma_status ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	ret = dma_cookie_status(dchan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	if (ret == DMA_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	spin_lock_bh(&chan->desc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	fsldma_cleanup_descriptors(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	spin_unlock_bh(&chan->desc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	return dma_cookie_status(dchan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) /*----------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) /* Interrupt Handling                                                         */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) /*----------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) static irqreturn_t fsldma_chan_irq(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	struct fsldma_chan *chan = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	u32 stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	/* save and clear the status register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	stat = get_sr(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	set_sr(chan, stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	chan_dbg(chan, "irq: stat = 0x%x\n", stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	/* check that this was really our device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	if (!stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	if (stat & FSL_DMA_SR_TE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		chan_err(chan, "Transfer Error!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	 * Programming Error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	 * trigger a PE interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	if (stat & FSL_DMA_SR_PE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		chan_dbg(chan, "irq: Programming Error INT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		stat &= ~FSL_DMA_SR_PE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		if (get_bcr(chan) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 			chan_err(chan, "Programming Error!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	 * For MPC8349, EOCDI event need to update cookie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	 * and start the next transfer if it exist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	if (stat & FSL_DMA_SR_EOCDI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		chan_dbg(chan, "irq: End-of-Chain link INT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		stat &= ~FSL_DMA_SR_EOCDI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	 * If it current transfer is the end-of-transfer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	 * we should clear the Channel Start bit for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	 * prepare next transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	if (stat & FSL_DMA_SR_EOLNI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		chan_dbg(chan, "irq: End-of-link INT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		stat &= ~FSL_DMA_SR_EOLNI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	/* check that the DMA controller is really idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	if (!dma_is_idle(chan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		chan_err(chan, "irq: controller not idle!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	/* check that we handled all of the bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	if (stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		chan_err(chan, "irq: unhandled sr 0x%08x\n", stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	 * Schedule the tasklet to handle all cleanup of the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	 * transaction. It will start a new transaction if there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	 * one pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	tasklet_schedule(&chan->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	chan_dbg(chan, "irq: Exit\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) static void dma_do_tasklet(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	struct fsldma_chan *chan = from_tasklet(chan, t, tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	chan_dbg(chan, "tasklet entry\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	spin_lock(&chan->desc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	/* the hardware is now idle and ready for more */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	chan->idle = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	/* Run all cleanup for descriptors which have been completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	fsldma_cleanup_descriptors(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	spin_unlock(&chan->desc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	chan_dbg(chan, "tasklet exit\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	struct fsldma_device *fdev = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	struct fsldma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	unsigned int handled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	u32 gsr, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 						   : in_le32(fdev->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	mask = 0xff000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		chan = fdev->chan[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		if (!chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		if (gsr & mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 			dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 			fsldma_chan_irq(irq, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 			handled++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		gsr &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		mask >>= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	return IRQ_RETVAL(handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) static void fsldma_free_irqs(struct fsldma_device *fdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	struct fsldma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	if (fdev->irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		dev_dbg(fdev->dev, "free per-controller IRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		free_irq(fdev->irq, fdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		chan = fdev->chan[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		if (chan && chan->irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 			chan_dbg(chan, "free per-channel IRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 			free_irq(chan->irq, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) static int fsldma_request_irqs(struct fsldma_device *fdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	struct fsldma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	/* if we have a per-controller IRQ, use that */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	if (fdev->irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		dev_dbg(fdev->dev, "request per-controller IRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 				  "fsldma-controller", fdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	/* no per-controller IRQ, use the per-channel IRQs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		chan = fdev->chan[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		if (!chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		if (!chan->irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 			chan_err(chan, "interrupts property missing in device tree\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 			ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 			goto out_unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		chan_dbg(chan, "request per-channel IRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 				  "fsldma-chan", chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 			chan_err(chan, "unable to request per-channel IRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 			goto out_unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) out_unwind:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	for (/* none */; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		chan = fdev->chan[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		if (!chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		if (!chan->irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		free_irq(chan->irq, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) /*----------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) /* OpenFirmware Subsystem                                                     */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) /*----------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) static int fsl_dma_chan_probe(struct fsldma_device *fdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	struct device_node *node, u32 feature, const char *compatible)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	struct fsldma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	struct resource res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	/* alloc channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	if (!chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		goto out_return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	/* ioremap registers for use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	chan->regs = of_iomap(node, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	if (!chan->regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		dev_err(fdev->dev, "unable to ioremap registers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		goto out_free_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	err = of_address_to_resource(node, 0, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 		dev_err(fdev->dev, "unable to find 'reg' property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 		goto out_iounmap_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	chan->feature = feature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	if (!fdev->feature)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 		fdev->feature = chan->feature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	 * If the DMA device's feature is different than the feature
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	 * of its channels, report the bug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	WARN_ON(fdev->feature != chan->feature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	chan->dev = fdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	chan->id = (res.start & 0xfff) < 0x300 ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		   ((res.start - 0x100) & 0xfff) >> 7 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		   ((res.start - 0x200) & 0xfff) >> 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		dev_err(fdev->dev, "too many channels for device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		goto out_iounmap_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	fdev->chan[chan->id] = chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	tasklet_setup(&chan->tasklet, dma_do_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	/* Initialize the channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	dma_init(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	/* Clear cdar registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	set_cdar(chan, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	switch (chan->feature & FSL_DMA_IP_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	case FSL_DMA_IP_85XX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	case FSL_DMA_IP_83XX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		chan->toggle_ext_start = fsl_chan_toggle_ext_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 		chan->set_src_loop_size = fsl_chan_set_src_loop_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		chan->set_request_count = fsl_chan_set_request_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	spin_lock_init(&chan->desc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	INIT_LIST_HEAD(&chan->ld_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	INIT_LIST_HEAD(&chan->ld_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	INIT_LIST_HEAD(&chan->ld_completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	chan->idle = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	chan->pm_state = RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	chan->common.device = &fdev->common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	dma_cookie_init(&chan->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	/* find the IRQ line, if it exists in the device tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	chan->irq = irq_of_parse_and_map(node, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	/* Add the channel to DMA device channel list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	list_add_tail(&chan->common.device_node, &fdev->common.channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		 chan->irq ? chan->irq : fdev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) out_iounmap_regs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	iounmap(chan->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) out_free_chan:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	kfree(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) out_return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) static void fsl_dma_chan_remove(struct fsldma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	irq_dispose_mapping(chan->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	list_del(&chan->common.device_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	iounmap(chan->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	kfree(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) static int fsldma_of_probe(struct platform_device *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	struct fsldma_device *fdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	struct device_node *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	if (!fdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		goto out_return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	fdev->dev = &op->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	INIT_LIST_HEAD(&fdev->common.channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	/* ioremap the registers for use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	fdev->regs = of_iomap(op->dev.of_node, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	if (!fdev->regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 		dev_err(&op->dev, "unable to ioremap registers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	/* map the channel IRQ if it exists, but don't hookup the handler yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	fdev->common.device_tx_status = fsl_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	fdev->common.device_config = fsl_dma_device_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	fdev->common.dev = &op->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	fdev->common.src_addr_widths = FSL_DMA_BUSWIDTHS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	fdev->common.dst_addr_widths = FSL_DMA_BUSWIDTHS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	fdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	fdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	platform_set_drvdata(op, fdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	 * We cannot use of_platform_bus_probe() because there is no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	 * of_platform_bus_remove(). Instead, we manually instantiate every DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	 * channel object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	for_each_child_of_node(op->dev.of_node, child) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 			fsl_dma_chan_probe(fdev, child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 				FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 				"fsl,eloplus-dma-channel");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		if (of_device_is_compatible(child, "fsl,elo-dma-channel")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 			fsl_dma_chan_probe(fdev, child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 				FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 				"fsl,elo-dma-channel");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	 * Hookup the IRQ handler(s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	 * If we have a per-controller interrupt, we prefer that to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	 * per-channel interrupts to reduce the number of shared interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	 * handlers on the same IRQ line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	err = fsldma_request_irqs(fdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 		dev_err(fdev->dev, "unable to request IRQs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 		goto out_free_fdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	dma_async_device_register(&fdev->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) out_free_fdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 		if (fdev->chan[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 			fsl_dma_chan_remove(fdev->chan[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	irq_dispose_mapping(fdev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	iounmap(fdev->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	kfree(fdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) out_return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) static int fsldma_of_remove(struct platform_device *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	struct fsldma_device *fdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	fdev = platform_get_drvdata(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	dma_async_device_unregister(&fdev->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	fsldma_free_irqs(fdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 		if (fdev->chan[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 			fsl_dma_chan_remove(fdev->chan[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	irq_dispose_mapping(fdev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	iounmap(fdev->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	kfree(fdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) static int fsldma_suspend_late(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	struct fsldma_device *fdev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	struct fsldma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 		chan = fdev->chan[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 		if (!chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 		spin_lock_bh(&chan->desc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 		if (unlikely(!chan->idle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 		chan->regs_save.mr = get_mr(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 		chan->pm_state = SUSPENDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 		spin_unlock_bh(&chan->desc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	for (; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		chan = fdev->chan[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 		if (!chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		chan->pm_state = RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 		spin_unlock_bh(&chan->desc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) static int fsldma_resume_early(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	struct fsldma_device *fdev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	struct fsldma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	u32 mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 		chan = fdev->chan[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 		if (!chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 		spin_lock_bh(&chan->desc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 		mode = chan->regs_save.mr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 			& ~FSL_DMA_MR_CS & ~FSL_DMA_MR_CC & ~FSL_DMA_MR_CA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 		set_mr(chan, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 		chan->pm_state = RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 		spin_unlock_bh(&chan->desc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) static const struct dev_pm_ops fsldma_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	.suspend_late	= fsldma_suspend_late,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	.resume_early	= fsldma_resume_early,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) static const struct of_device_id fsldma_of_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	{ .compatible = "fsl,elo3-dma", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	{ .compatible = "fsl,eloplus-dma", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	{ .compatible = "fsl,elo-dma", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	{}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) MODULE_DEVICE_TABLE(of, fsldma_of_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) static struct platform_driver fsldma_of_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 		.name = "fsl-elo-dma",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 		.of_match_table = fsldma_of_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 		.pm = &fsldma_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	.probe = fsldma_of_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	.remove = fsldma_of_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) /*----------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) /* Module Init / Exit                                                         */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) /*----------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) static __init int fsldma_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	pr_info("Freescale Elo series DMA driver\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	return platform_driver_register(&fsldma_of_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) static void __exit fsldma_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	platform_driver_unregister(&fsldma_of_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) subsys_initcall(fsldma_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) module_exit(fsldma_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) MODULE_DESCRIPTION("Freescale Elo series DMA driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) MODULE_LICENSE("GPL");