Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Author:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *   Zhang Wei <wei.zhang@freescale.com>, Jul 2007
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *   Ebony Zhu <ebony.zhu@freescale.com>, May 2007
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #ifndef __DMA_FSLDMA_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #define __DMA_FSLDMA_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) /* Define data structures needed by Freescale
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  * MPC8540 and MPC8349 DMA controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #define FSL_DMA_MR_CS		0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #define FSL_DMA_MR_CC		0x00000002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #define FSL_DMA_MR_CA		0x00000008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #define FSL_DMA_MR_EIE		0x00000040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #define FSL_DMA_MR_XFE		0x00000020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #define FSL_DMA_MR_EOLNIE	0x00000100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define FSL_DMA_MR_EOLSIE	0x00000080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #define FSL_DMA_MR_EOSIE	0x00000200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #define FSL_DMA_MR_CDSM		0x00000010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define FSL_DMA_MR_CTM		0x00000004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define FSL_DMA_MR_EMP_EN	0x00200000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #define FSL_DMA_MR_EMS_EN	0x00040000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define FSL_DMA_MR_DAHE		0x00002000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #define FSL_DMA_MR_SAHE		0x00001000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define FSL_DMA_MR_SAHTS_MASK	0x0000C000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #define FSL_DMA_MR_DAHTS_MASK	0x00030000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define FSL_DMA_MR_BWC_MASK	0x0f000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  * Bandwidth/pause control determines how many bytes a given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  * channel is allowed to transfer before the DMA engine pauses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  * the current channel and switches to the next channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #define FSL_DMA_MR_BWC         0x0A000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) /* Special MR definition for MPC8349 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #define FSL_DMA_MR_EOTIE	0x00000080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define FSL_DMA_MR_PRC_RM	0x00000800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #define FSL_DMA_SR_CH		0x00000020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #define FSL_DMA_SR_PE		0x00000010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #define FSL_DMA_SR_CB		0x00000004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #define FSL_DMA_SR_TE		0x00000080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #define FSL_DMA_SR_EOSI		0x00000002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #define FSL_DMA_SR_EOLSI	0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #define FSL_DMA_SR_EOCDI	0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) #define FSL_DMA_SR_EOLNI	0x00000008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) #define FSL_DMA_SATR_SBPATMU			0x20000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) #define FSL_DMA_SATR_STRANSINT_RIO		0x00c00000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) #define FSL_DMA_SATR_SREADTYPE_SNOOP_READ	0x00050000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) #define FSL_DMA_SATR_SREADTYPE_BP_IORH		0x00020000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #define FSL_DMA_SATR_SREADTYPE_BP_NREAD		0x00040000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) #define FSL_DMA_SATR_SREADTYPE_BP_MREAD		0x00070000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) #define FSL_DMA_DATR_DBPATMU			0x20000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) #define FSL_DMA_DATR_DTRANSINT_RIO		0x00c00000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) #define FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE	0x00050000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) #define FSL_DMA_DATR_DWRITETYPE_BP_FLUSH	0x00010000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) #define FSL_DMA_EOL		((u64)0x1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) #define FSL_DMA_SNEN		((u64)0x10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) #define FSL_DMA_EOSIE		0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) #define FSL_DMA_NLDA_MASK	(~(u64)0x1f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) #define FSL_DMA_BCR_MAX_CNT	0x03ffffffu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) #define FSL_DMA_DGSR_TE		0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) #define FSL_DMA_DGSR_CH		0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) #define FSL_DMA_DGSR_PE		0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) #define FSL_DMA_DGSR_EOLNI	0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) #define FSL_DMA_DGSR_CB		0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) #define FSL_DMA_DGSR_EOSI	0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) #define FSL_DMA_DGSR_EOLSI	0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) #define FSL_DMA_BUSWIDTHS	(BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 				BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 				BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 				BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) typedef u64 __bitwise v64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) typedef u32 __bitwise v32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) struct fsl_dma_ld_hw {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	v64 src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	v64 dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	v64 next_ln_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	v32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	v32 reserve;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) } __attribute__((aligned(32)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct fsl_desc_sw {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	struct fsl_dma_ld_hw hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	struct list_head tx_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	struct dma_async_tx_descriptor async_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) } __attribute__((aligned(32)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct fsldma_chan_regs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	u32 mr;		/* 0x00 - Mode Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	u32 sr;		/* 0x04 - Status Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	u64 cdar;	/* 0x08 - Current descriptor address register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	u64 sar;	/* 0x10 - Source Address Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	u64 dar;	/* 0x18 - Destination Address Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	u32 bcr;	/* 0x20 - Byte Count Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	u64 ndar;	/* 0x24 - Next Descriptor Address Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct fsldma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define FSL_DMA_MAX_CHANS_PER_DEVICE 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct fsldma_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	void __iomem *regs;	/* DGSR register base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	struct dma_device common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	struct fsldma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	u32 feature;		/* The same as DMA channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	int irq;		/* Channel IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* Define macros for fsldma_chan->feature property */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define FSL_DMA_LITTLE_ENDIAN	0x00000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define FSL_DMA_BIG_ENDIAN	0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define FSL_DMA_IP_MASK		0x00000ff0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define FSL_DMA_IP_85XX		0x00000010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define FSL_DMA_IP_83XX		0x00000020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define FSL_DMA_CHAN_PAUSE_EXT	0x00001000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define FSL_DMA_CHAN_START_EXT	0x00002000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct fsldma_chan_regs_save {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	u32 mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) enum fsldma_pm_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	RUNNING = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	SUSPENDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct fsldma_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	char name[8];			/* Channel name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	struct fsldma_chan_regs __iomem *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	spinlock_t desc_lock;		/* Descriptor operation lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	 * Descriptors which are queued to run, but have not yet been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	 * submitted to the hardware for execution
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	struct list_head ld_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	 * Descriptors which are currently being executed by the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	struct list_head ld_running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	 * Descriptors which have finished execution by the hardware. These
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	 * descriptors have already had their cleanup actions run. They are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	 * waiting for the ACK bit to be set by the async_tx API.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	struct list_head ld_completed;	/* Link descriptors queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	struct dma_chan common;		/* DMA common channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	struct dma_pool *desc_pool;	/* Descriptors pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	struct device *dev;		/* Channel device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	int irq;			/* Channel IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	int id;				/* Raw id of this channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	struct tasklet_struct tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	u32 feature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	bool idle;			/* DMA controller is idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	struct fsldma_chan_regs_save regs_save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	enum fsldma_pm_state pm_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	void (*set_src_loop_size)(struct fsldma_chan *fsl_chan, int size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	void (*set_dst_loop_size)(struct fsldma_chan *fsl_chan, int size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	void (*set_request_count)(struct fsldma_chan *fsl_chan, int size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #define to_fsl_chan(chan) container_of(chan, struct fsldma_chan, common)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #ifdef	CONFIG_PPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #define fsl_ioread32(p)		in_le32(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #define fsl_ioread32be(p)	in_be32(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #define fsl_iowrite32(v, p)	out_le32(p, v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) #define fsl_iowrite32be(v, p)	out_be32(p, v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #ifdef __powerpc64__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #define fsl_ioread64(p)		in_le64(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #define fsl_ioread64be(p)	in_be64(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #define fsl_iowrite64(v, p)	out_le64(p, v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #define fsl_iowrite64be(v, p)	out_be64(p, v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static u64 fsl_ioread64(const u64 __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	u32 val_lo = in_le32((u32 __iomem *)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	u32 val_hi = in_le32((u32 __iomem *)addr + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	return ((u64)val_hi << 32) + val_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static void fsl_iowrite64(u64 val, u64 __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	out_le32((u32 __iomem *)addr + 1, val >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	out_le32((u32 __iomem *)addr, (u32)val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static u64 fsl_ioread64be(const u64 __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	u32 val_hi = in_be32((u32 __iomem *)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	u32 val_lo = in_be32((u32 __iomem *)addr + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	return ((u64)val_hi << 32) + val_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static void fsl_iowrite64be(u64 val, u64 __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	out_be32((u32 __iomem *)addr, val >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	out_be32((u32 __iomem *)addr + 1, (u32)val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) #if defined(CONFIG_ARM64) || defined(CONFIG_ARM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) #define fsl_ioread32(p)		ioread32(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) #define fsl_ioread32be(p)	ioread32be(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) #define fsl_iowrite32(v, p)	iowrite32(v, p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) #define fsl_iowrite32be(v, p)	iowrite32be(v, p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) #define fsl_ioread64(p)		ioread64(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) #define fsl_ioread64be(p)	ioread64be(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) #define fsl_iowrite64(v, p)	iowrite64(v, p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) #define fsl_iowrite64be(v, p)	iowrite64be(v, p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) #define FSL_DMA_IN(fsl_dma, addr, width)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		(((fsl_dma)->feature & FSL_DMA_BIG_ENDIAN) ?	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 			fsl_ioread##width##be(addr) : fsl_ioread##width(addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) #define FSL_DMA_OUT(fsl_dma, addr, val, width)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		(((fsl_dma)->feature & FSL_DMA_BIG_ENDIAN) ?	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 			fsl_iowrite##width##be(val, addr) : fsl_iowrite	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		##width(val, addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) #define DMA_TO_CPU(fsl_chan, d, width)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		(((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ?		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 			be##width##_to_cpu((__force __be##width)(v##width)d) : \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 			le##width##_to_cpu((__force __le##width)(v##width)d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) #define CPU_TO_DMA(fsl_chan, c, width)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		(((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ?		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 			(__force v##width)cpu_to_be##width(c) :		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 			(__force v##width)cpu_to_le##width(c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) #endif	/* __DMA_FSLDMA_H */