Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /****************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Driver for Solarflare network controllers and boards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright 2005-2006 Fen Systems Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright 2006-2013 Solarflare Communications Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #ifndef EFX_IO_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #define EFX_IO_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) /**************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * NIC register I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  **************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * Notes on locking strategy for the Falcon architecture:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * Many CSRs are very wide and cannot be read or written atomically.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * Writes from the host are buffered by the Bus Interface Unit (BIU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * up to 128 bits.  Whenever the host writes part of such a register,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * the BIU collects the written value and does not write to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * underlying register until all 4 dwords have been written.  A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * similar buffering scheme applies to host access to the NIC's 64-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * SRAM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * Writes to different CSRs and 64-bit SRAM words must be serialised,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * since interleaved access can result in lost writes.  We use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * efx_nic::biu_lock for this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * We also serialise reads from 128-bit CSRs and SRAM with the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  * spinlock.  This may not be necessary, but it doesn't really matter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  * as there are no such reads on the fast path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  * 128-bit but are special-cased in the BIU to avoid the need for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  * locking in the host:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  * - They are write-only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  * - The semantics of writing to these registers are such that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  *   replacing the low 96 bits with zero does not affect functionality.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  * - If the host writes to the last dword address of such a register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  *   (i.e. the high 32 bits) the underlying register will always be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  *   written.  If the collector and the current write together do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  *   provide values for all 128 bits of the register, the low 96 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  *   will be written as zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  * - If the host writes to the address of any other part of such a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  *   register while the collector already holds values for some other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  *   register, the write is discarded and the collector maintains its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  *   current state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  * The EF10 architecture exposes very few registers to the host and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  * most of them are only 32 bits wide.  The only exceptions are the MC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  * doorbell register pair, which has its own latching, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  * TX_DESC_UPD, which works in a similar way to the Falcon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  * architecture.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #if BITS_PER_LONG == 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) #define EFX_USE_QWORD_IO 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) /* Hardware issue requires that only 64-bit naturally aligned writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  * are seen by hardware. Its not strictly necessary to restrict to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  * x86_64 arch, but done for safety since unusual write combining behaviour
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  * can break PIO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) /* PIO is a win only if write-combining is possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) #ifdef ARCH_HAS_IOREMAP_WC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) #define EFX_USE_PIO 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) static inline u32 efx_reg(struct efx_nic *efx, unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	return efx->reg_base + reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) #ifdef EFX_USE_QWORD_IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 				  unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	__raw_writeq((__force u64)value, efx->membase + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) static inline __le64 _efx_readq(struct efx_nic *efx, unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	return (__force __le64)__raw_readq(efx->membase + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) static inline void _efx_writed(struct efx_nic *efx, __le32 value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 				  unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	__raw_writel((__force u32)value, efx->membase + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	return (__force __le32)__raw_readl(efx->membase + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* Write a normal 128-bit CSR, locking as appropriate. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static inline void efx_writeo(struct efx_nic *efx, const efx_oword_t *value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 			      unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	unsigned long flags __attribute__ ((unused));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	netif_vdbg(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		   "writing register %x with " EFX_OWORD_FMT "\n", reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		   EFX_OWORD_VAL(*value));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	spin_lock_irqsave(&efx->biu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #ifdef EFX_USE_QWORD_IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	_efx_writeq(efx, value->u64[0], reg + 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	_efx_writeq(efx, value->u64[1], reg + 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	_efx_writed(efx, value->u32[0], reg + 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	_efx_writed(efx, value->u32[1], reg + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	_efx_writed(efx, value->u32[2], reg + 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	_efx_writed(efx, value->u32[3], reg + 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	spin_unlock_irqrestore(&efx->biu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 				   const efx_qword_t *value, unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	unsigned int addr = index * sizeof(*value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	unsigned long flags __attribute__ ((unused));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	netif_vdbg(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		   "writing SRAM address %x with " EFX_QWORD_FMT "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		   addr, EFX_QWORD_VAL(*value));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	spin_lock_irqsave(&efx->biu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #ifdef EFX_USE_QWORD_IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	__raw_writeq((__force u64)value->u64[0], membase + addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	__raw_writel((__force u32)value->u32[0], membase + addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	__raw_writel((__force u32)value->u32[1], membase + addr + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	spin_unlock_irqrestore(&efx->biu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /* Write a 32-bit CSR or the last dword of a special 128-bit CSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static inline void efx_writed(struct efx_nic *efx, const efx_dword_t *value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 			      unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	netif_vdbg(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		   "writing register %x with "EFX_DWORD_FMT"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		   reg, EFX_DWORD_VAL(*value));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	/* No lock required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	_efx_writed(efx, value->u32[0], reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /* Read a 128-bit CSR, locking as appropriate. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 			     unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	unsigned long flags __attribute__ ((unused));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	spin_lock_irqsave(&efx->biu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	value->u32[0] = _efx_readd(efx, reg + 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	value->u32[1] = _efx_readd(efx, reg + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	value->u32[2] = _efx_readd(efx, reg + 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	value->u32[3] = _efx_readd(efx, reg + 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	spin_unlock_irqrestore(&efx->biu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	netif_vdbg(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		   "read from register %x, got " EFX_OWORD_FMT "\n", reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		   EFX_OWORD_VAL(*value));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* Read 64-bit SRAM through the supplied mapping, locking as appropriate. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 				  efx_qword_t *value, unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	unsigned int addr = index * sizeof(*value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	unsigned long flags __attribute__ ((unused));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	spin_lock_irqsave(&efx->biu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #ifdef EFX_USE_QWORD_IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	value->u64[0] = (__force __le64)__raw_readq(membase + addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	value->u32[0] = (__force __le32)__raw_readl(membase + addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	spin_unlock_irqrestore(&efx->biu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	netif_vdbg(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		   "read from SRAM address %x, got "EFX_QWORD_FMT"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		   addr, EFX_QWORD_VAL(*value));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /* Read a 32-bit CSR or SRAM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 				unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	value->u32[0] = _efx_readd(efx, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	netif_vdbg(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		   "read from register %x, got "EFX_DWORD_FMT"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		   reg, EFX_DWORD_VAL(*value));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /* Write a 128-bit CSR forming part of a table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) efx_writeo_table(struct efx_nic *efx, const efx_oword_t *value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		 unsigned int reg, unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	efx_writeo(efx, value, reg + index * sizeof(efx_oword_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /* Read a 128-bit CSR forming part of a table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 				     unsigned int reg, unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /* default VI stride (step between per-VI registers) is 8K on EF10 and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)  * 64K on EF100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) #define EFX_DEFAULT_VI_STRIDE		0x2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) #define EF100_DEFAULT_VI_STRIDE		0x10000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /* Calculate offset to page-mapped register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static inline unsigned int efx_paged_reg(struct efx_nic *efx, unsigned int page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 					 unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	return page * efx->vi_stride + reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /* Write the whole of RX_DESC_UPD or TX_DESC_UPD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 				    unsigned int reg, unsigned int page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	reg = efx_paged_reg(efx, page, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	netif_vdbg(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		   "writing register %x with " EFX_OWORD_FMT "\n", reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		   EFX_OWORD_VAL(*value));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) #ifdef EFX_USE_QWORD_IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	_efx_writeq(efx, value->u64[0], reg + 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	_efx_writeq(efx, value->u64[1], reg + 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	_efx_writed(efx, value->u32[0], reg + 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	_efx_writed(efx, value->u32[1], reg + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	_efx_writed(efx, value->u32[2], reg + 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	_efx_writed(efx, value->u32[3], reg + 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) #define efx_writeo_page(efx, value, reg, page)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	_efx_writeo_page(efx, value,					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 			 reg +						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 			 BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 			 page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /* Write a page-mapped 32-bit CSR (EVQ_RPTR, EVQ_TMR (EF10), or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)  * high bits of RX_DESC_UPD or TX_DESC_UPD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) _efx_writed_page(struct efx_nic *efx, const efx_dword_t *value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		 unsigned int reg, unsigned int page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	efx_writed(efx, value, efx_paged_reg(efx, page, reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) #define efx_writed_page(efx, value, reg, page)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	_efx_writed_page(efx, value,					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 			 reg +						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 			 BUILD_BUG_ON_ZERO((reg) != 0x180 &&		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 					   (reg) != 0x200 &&		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 					   (reg) != 0x400 &&		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 					   (reg) != 0x420 &&		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 					   (reg) != 0x830 &&		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 					   (reg) != 0x83c &&		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 					   (reg) != 0xa18 &&		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 					   (reg) != 0xa1c),		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 			 page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /* Write TIMER_COMMAND.  This is a page-mapped 32-bit CSR, but a bug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)  * in the BIU means that writes to TIMER_COMMAND[0] invalidate the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)  * collector register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static inline void _efx_writed_page_locked(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 					   const efx_dword_t *value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 					   unsigned int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 					   unsigned int page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	unsigned long flags __attribute__ ((unused));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	if (page == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		spin_lock_irqsave(&efx->biu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 		efx_writed(efx, value, efx_paged_reg(efx, page, reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 		spin_unlock_irqrestore(&efx->biu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		efx_writed(efx, value, efx_paged_reg(efx, page, reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) #define efx_writed_page_locked(efx, value, reg, page)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	_efx_writed_page_locked(efx, value,				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 				reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 				page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) #endif /* EFX_IO_H */