Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2007, 2008, Marvell International Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #ifndef MV_XOR_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #define MV_XOR_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #define MV_XOR_POOL_SIZE		(MV_XOR_SLOT_SIZE * 3072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #define MV_XOR_SLOT_SIZE		64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #define MV_XOR_THRESHOLD		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #define MV_XOR_MAX_CHANNELS             2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #define MV_XOR_MIN_BYTE_COUNT		SZ_128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #define MV_XOR_MAX_BYTE_COUNT		(SZ_16M - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) /* Values for the XOR_CONFIG register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #define XOR_OPERATION_MODE_XOR		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #define XOR_OPERATION_MODE_MEMCPY	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define XOR_OPERATION_MODE_IN_DESC      7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #define XOR_DESCRIPTOR_SWAP		BIT(14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #define XOR_DESC_SUCCESS		0x40000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define XOR_DESC_OPERATION_XOR          (0 << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #define XOR_DESC_OPERATION_CRC32C       (1 << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define XOR_DESC_OPERATION_MEMCPY       (2 << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define XOR_DESC_DMA_OWNED		BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define XOR_DESC_EOD_INT_EN		BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define XOR_CURR_DESC(chan)	(chan->mmr_high_base + 0x10 + (chan->idx * 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #define XOR_NEXT_DESC(chan)	(chan->mmr_high_base + 0x00 + (chan->idx * 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define XOR_BYTE_COUNT(chan)	(chan->mmr_high_base + 0x20 + (chan->idx * 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define XOR_DEST_POINTER(chan)	(chan->mmr_high_base + 0xB0 + (chan->idx * 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define XOR_BLOCK_SIZE(chan)	(chan->mmr_high_base + 0xC0 + (chan->idx * 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #define XOR_INIT_VALUE_LOW(chan)	(chan->mmr_high_base + 0xE0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #define XOR_INIT_VALUE_HIGH(chan)	(chan->mmr_high_base + 0xE4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #define XOR_CONFIG(chan)	(chan->mmr_base + 0x10 + (chan->idx * 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #define XOR_ACTIVATION(chan)	(chan->mmr_base + 0x20 + (chan->idx * 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #define XOR_INTR_CAUSE(chan)	(chan->mmr_base + 0x30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define XOR_INTR_MASK(chan)	(chan->mmr_base + 0x40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #define XOR_ERROR_CAUSE(chan)	(chan->mmr_base + 0x50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #define XOR_ERROR_ADDR(chan)	(chan->mmr_base + 0x60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #define XOR_INT_END_OF_DESC	BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #define XOR_INT_END_OF_CHAIN	BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #define XOR_INT_STOPPED		BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #define XOR_INT_PAUSED		BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #define XOR_INT_ERR_DECODE	BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) #define XOR_INT_ERR_RDPROT	BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) #define XOR_INT_ERR_WRPROT	BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) #define XOR_INT_ERR_OWN		BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) #define XOR_INT_ERR_PAR		BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) #define XOR_INT_ERR_MBUS	BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #define XOR_INTR_ERRORS		(XOR_INT_ERR_DECODE | XOR_INT_ERR_RDPROT | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 				 XOR_INT_ERR_WRPROT | XOR_INT_ERR_OWN    | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 				 XOR_INT_ERR_PAR    | XOR_INT_ERR_MBUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) #define XOR_INTR_MASK_VALUE	(XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 				 XOR_INT_STOPPED     | XOR_INTR_ERRORS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) #define WINDOW_BASE(w)		(0x50 + ((w) << 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) #define WINDOW_SIZE(w)		(0x70 + ((w) << 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) #define WINDOW_REMAP_HIGH(w)	(0x90 + ((w) << 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) #define WINDOW_BAR_ENABLE(chan)	(0x40 + ((chan) << 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) #define WINDOW_OVERRIDE_CTRL(chan)	(0xA0 + ((chan) << 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) #define WINDOW_COUNT		8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) struct mv_xor_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	void __iomem	     *xor_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	void __iomem	     *xor_high_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	struct clk	     *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	struct mv_xor_chan   *channels[MV_XOR_MAX_CHANNELS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	int		     xor_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	u32                  win_start[WINDOW_COUNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	u32                  win_end[WINDOW_COUNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89)  * struct mv_xor_chan - internal representation of a XOR channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90)  * @pending: allows batching of hardware operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91)  * @lock: serializes enqueue/dequeue operations to the descriptors pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  * @mmr_base: memory mapped register base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  * @idx: the index of the xor channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  * @chain: device chain view of the descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)  * @free_slots: free slots usable by the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96)  * @allocated_slots: slots allocated by the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97)  * @completed_slots: slots completed by HW but still need to be acked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  * @device: parent device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  * @common: common dmaengine channel object members
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  * @slots_allocated: records the actual size of the descriptor slot pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  * @irq_tasklet: bottom half where mv_xor_slot_cleanup runs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)  * @op_in_desc: new mode of driver, each op is writen to descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct mv_xor_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	int			pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	spinlock_t		lock; /* protects the descriptor slot pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	void __iomem		*mmr_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	void __iomem		*mmr_high_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	unsigned int		idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	int                     irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	struct list_head	chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	struct list_head	free_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	struct list_head	allocated_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	struct list_head	completed_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	dma_addr_t		dma_desc_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	void			*dma_desc_pool_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	size_t                  pool_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	struct dma_device	dmadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	struct dma_chan		dmachan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	int			slots_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	struct tasklet_struct	irq_tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	int                     op_in_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	char			dummy_src[MV_XOR_MIN_BYTE_COUNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	char			dummy_dst[MV_XOR_MIN_BYTE_COUNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	dma_addr_t		dummy_src_addr, dummy_dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	u32                     saved_config_reg, saved_int_mask_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	struct mv_xor_device	*xordev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)  * struct mv_xor_desc_slot - software descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)  * @node: node on the mv_xor_chan lists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)  * @hw_desc: virtual address of the hardware descriptor chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)  * @phys: hardware address of the hardware descriptor chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)  * @slot_used: slot in use or not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)  * @idx: pool index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)  * @tx_list: list of slots that make up a multi-descriptor transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)  * @async_tx: support for the async_tx api
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct mv_xor_desc_slot {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	struct list_head	node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	struct list_head	sg_tx_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	enum dma_transaction_type	type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	void			*hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	u16			idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	struct dma_async_tx_descriptor	async_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)  * This structure describes XOR descriptor size 64bytes. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)  * mv_phy_src_idx() macro must be used when indexing the values of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)  * phy_src_addr[] array. This is due to the fact that the 'descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)  * swap' feature, used on big endian systems, swaps descriptors data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)  * within blocks of 8 bytes. So two consecutive values of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)  * phy_src_addr[] array are actually swapped in big-endian, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)  * explains the different mv_phy_src_idx() implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #if defined(__LITTLE_ENDIAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct mv_xor_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	u32 status;		/* descriptor execution status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	u32 crc32_result;	/* result of CRC-32 calculation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	u32 desc_command;	/* type of operation to be carried out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	u32 phy_next_desc;	/* next descriptor address pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	u32 byte_count;		/* size of src/dst blocks in bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	u32 phy_dest_addr;	/* destination block address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	u32 phy_src_addr[8];	/* source block addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	u32 reserved0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	u32 reserved1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #define mv_phy_src_idx(src_idx) (src_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct mv_xor_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	u32 crc32_result;	/* result of CRC-32 calculation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	u32 status;		/* descriptor execution status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	u32 phy_next_desc;	/* next descriptor address pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	u32 desc_command;	/* type of operation to be carried out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	u32 phy_dest_addr;	/* destination block address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	u32 byte_count;		/* size of src/dst blocks in bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	u32 phy_src_addr[8];	/* source block addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	u32 reserved1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	u32 reserved0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #define mv_phy_src_idx(src_idx) (src_idx ^ 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #define to_mv_sw_desc(addr_hw_desc)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	container_of(addr_hw_desc, struct mv_xor_desc_slot, hw_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #define mv_hw_desc_slot_idx(hw_desc, idx)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	((void *)(((unsigned long)hw_desc) + ((idx) << 5)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #endif