^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Copyright (c) 2016-2017, National Instruments Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Moritz Fischer <mdf@kernel.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/of_mdio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/of_net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/phy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/mii.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/nvmem-consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define TX_BD_NUM 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define RX_BD_NUM 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /* Axi DMA Register definitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define XAXIDMA_TX_CR_OFFSET 0x00 /* Channel control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define XAXIDMA_TX_SR_OFFSET 0x04 /* Status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define XAXIDMA_TX_CDESC_OFFSET 0x08 /* Current descriptor pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define XAXIDMA_TX_TDESC_OFFSET 0x10 /* Tail descriptor pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define XAXIDMA_RX_CR_OFFSET 0x30 /* Channel control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define XAXIDMA_RX_SR_OFFSET 0x34 /* Status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define XAXIDMA_RX_CDESC_OFFSET 0x38 /* Current descriptor pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define XAXIDMA_RX_TDESC_OFFSET 0x40 /* Tail descriptor pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define XAXIDMA_CR_RUNSTOP_MASK 0x1 /* Start/stop DMA channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define XAXIDMA_CR_RESET_MASK 0x4 /* Reset DMA engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define XAXIDMA_BD_CTRL_LENGTH_MASK 0x007FFFFF /* Requested len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define XAXIDMA_BD_CTRL_ALL_MASK 0x0C000000 /* All control bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define XAXIDMA_DELAY_MASK 0xFF000000 /* Delay timeout counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define XAXIDMA_COALESCE_MASK 0x00FF0000 /* Coalesce counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define XAXIDMA_DELAY_SHIFT 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define XAXIDMA_COALESCE_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define XAXIDMA_IRQ_IOC_MASK 0x00001000 /* Completion intr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define XAXIDMA_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /* Default TX/RX Threshold and waitbound values for SGDMA mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define XAXIDMA_DFT_TX_THRESHOLD 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define XAXIDMA_DFT_TX_WAITBOUND 254
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define XAXIDMA_DFT_RX_THRESHOLD 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define XAXIDMA_DFT_RX_WAITBOUND 254
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define XAXIDMA_BD_STS_ACTUAL_LEN_MASK 0x007FFFFF /* Actual len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define XAXIDMA_BD_STS_COMPLETE_MASK 0x80000000 /* Completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define XAXIDMA_BD_STS_DEC_ERR_MASK 0x40000000 /* Decode error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define XAXIDMA_BD_STS_SLV_ERR_MASK 0x20000000 /* Slave error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define XAXIDMA_BD_STS_INT_ERR_MASK 0x10000000 /* Internal err */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define XAXIDMA_BD_STS_ALL_ERR_MASK 0x70000000 /* All errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define XAXIDMA_BD_STS_RXSOF_MASK 0x08000000 /* First rx pkt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define XAXIDMA_BD_STS_RXEOF_MASK 0x04000000 /* Last rx pkt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define XAXIDMA_BD_STS_ALL_MASK 0xFC000000 /* All status bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define NIXGE_REG_CTRL_OFFSET 0x4000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define NIXGE_REG_INFO 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define NIXGE_REG_MAC_CTL 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define NIXGE_REG_PHY_CTL 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define NIXGE_REG_LED_CTL 0x0c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define NIXGE_REG_MDIO_DATA 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define NIXGE_REG_MDIO_ADDR 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define NIXGE_REG_MDIO_OP 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define NIXGE_REG_MDIO_CTRL 0x1c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define NIXGE_ID_LED_CTL_EN BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define NIXGE_ID_LED_CTL_VAL BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define NIXGE_MDIO_CLAUSE45 BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define NIXGE_MDIO_CLAUSE22 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define NIXGE_MDIO_OP(n) (((n) & 0x3) << 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define NIXGE_MDIO_OP_ADDRESS 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define NIXGE_MDIO_C45_WRITE BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define NIXGE_MDIO_C45_READ (BIT(1) | BIT(0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define NIXGE_MDIO_C22_WRITE BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define NIXGE_MDIO_C22_READ BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define NIXGE_MDIO_ADDR(n) (((n) & 0x1f) << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define NIXGE_MDIO_MMD(n) (((n) & 0x1f) << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define NIXGE_REG_MAC_LSB 0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define NIXGE_REG_MAC_MSB 0x1004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* Packet size info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define NIXGE_HDR_SIZE 14 /* Size of Ethernet header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define NIXGE_TRL_SIZE 4 /* Size of Ethernet trailer (FCS) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define NIXGE_MTU 1500 /* Max MTU of an Ethernet frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define NIXGE_JUMBO_MTU 9000 /* Max MTU of a jumbo Eth. frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define NIXGE_MAX_FRAME_SIZE (NIXGE_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define NIXGE_MAX_JUMBO_FRAME_SIZE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) (NIXGE_JUMBO_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) enum nixge_version {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) NIXGE_V2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) NIXGE_V3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) NIXGE_VERSION_COUNT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct nixge_hw_dma_bd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) u32 next_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) u32 next_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) u32 phys_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) u32 phys_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) u32 reserved3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) u32 reserved4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) u32 cntrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) u32 app0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) u32 app1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) u32 app2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) u32 app3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) u32 app4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) u32 sw_id_offset_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) u32 sw_id_offset_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) u32 reserved6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #ifdef CONFIG_PHYS_ADDR_T_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define nixge_hw_dma_bd_set_addr(bd, field, addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) (bd)->field##_lo = lower_32_bits((addr)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) (bd)->field##_hi = upper_32_bits((addr)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define nixge_hw_dma_bd_set_addr(bd, field, addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) ((bd)->field##_lo = lower_32_bits((addr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define nixge_hw_dma_bd_set_phys(bd, addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) nixge_hw_dma_bd_set_addr((bd), phys, (addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define nixge_hw_dma_bd_set_next(bd, addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) nixge_hw_dma_bd_set_addr((bd), next, (addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define nixge_hw_dma_bd_set_offset(bd, addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) nixge_hw_dma_bd_set_addr((bd), sw_id_offset, (addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #ifdef CONFIG_PHYS_ADDR_T_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define nixge_hw_dma_bd_get_addr(bd, field) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) (dma_addr_t)((((u64)(bd)->field##_hi) << 32) | ((bd)->field##_lo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define nixge_hw_dma_bd_get_addr(bd, field) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) (dma_addr_t)((bd)->field##_lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct nixge_tx_skb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) dma_addr_t mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) bool mapped_as_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct nixge_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct net_device *ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct napi_struct napi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /* Connection to PHY device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct device_node *phy_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) phy_interface_t phy_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) int link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) unsigned int speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) unsigned int duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* MDIO bus data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct mii_bus *mii_bus; /* MII bus reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /* IO registers, dma functions and IRQs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) void __iomem *ctrl_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) void __iomem *dma_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct tasklet_struct dma_err_tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) int tx_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) int rx_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /* Buffer descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct nixge_hw_dma_bd *tx_bd_v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct nixge_tx_skb *tx_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) dma_addr_t tx_bd_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct nixge_hw_dma_bd *rx_bd_v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) dma_addr_t rx_bd_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) u32 tx_bd_ci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) u32 tx_bd_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) u32 rx_bd_ci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) u32 coalesce_count_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) u32 coalesce_count_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static void nixge_dma_write_reg(struct nixge_priv *priv, off_t offset, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) writel(val, priv->dma_regs + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static void nixge_dma_write_desc_reg(struct nixge_priv *priv, off_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) dma_addr_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) writel(lower_32_bits(addr), priv->dma_regs + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) #ifdef CONFIG_PHYS_ADDR_T_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) writel(upper_32_bits(addr), priv->dma_regs + offset + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static u32 nixge_dma_read_reg(const struct nixge_priv *priv, off_t offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return readl(priv->dma_regs + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static void nixge_ctrl_write_reg(struct nixge_priv *priv, off_t offset, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) writel(val, priv->ctrl_regs + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static u32 nixge_ctrl_read_reg(struct nixge_priv *priv, off_t offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return readl(priv->ctrl_regs + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) #define nixge_ctrl_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) readl_poll_timeout((priv)->ctrl_regs + (addr), (val), (cond), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) (sleep_us), (timeout_us))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) #define nixge_dma_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) readl_poll_timeout((priv)->dma_regs + (addr), (val), (cond), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) (sleep_us), (timeout_us))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static void nixge_hw_dma_bd_release(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct nixge_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) dma_addr_t phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) for (i = 0; i < RX_BD_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) phys_addr = nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) dma_unmap_single(ndev->dev.parent, phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) NIXGE_MAX_JUMBO_FRAME_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) skb = (struct sk_buff *)(uintptr_t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) sw_id_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (priv->rx_bd_v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) dma_free_coherent(ndev->dev.parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) sizeof(*priv->rx_bd_v) * RX_BD_NUM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) priv->rx_bd_v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) priv->rx_bd_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (priv->tx_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) devm_kfree(ndev->dev.parent, priv->tx_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (priv->tx_bd_v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) dma_free_coherent(ndev->dev.parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) sizeof(*priv->tx_bd_v) * TX_BD_NUM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) priv->tx_bd_v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) priv->tx_bd_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) static int nixge_hw_dma_bd_init(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct nixge_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) dma_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) u32 cr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /* Reset the indexes which are used for accessing the BDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) priv->tx_bd_ci = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) priv->tx_bd_tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) priv->rx_bd_ci = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /* Allocate the Tx and Rx buffer descriptors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) priv->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) sizeof(*priv->tx_bd_v) * TX_BD_NUM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) &priv->tx_bd_p, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (!priv->tx_bd_v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) priv->tx_skb = devm_kcalloc(ndev->dev.parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) TX_BD_NUM, sizeof(*priv->tx_skb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (!priv->tx_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) priv->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) sizeof(*priv->rx_bd_v) * RX_BD_NUM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) &priv->rx_bd_p, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (!priv->rx_bd_v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) for (i = 0; i < TX_BD_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) nixge_hw_dma_bd_set_next(&priv->tx_bd_v[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) priv->tx_bd_p +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) sizeof(*priv->tx_bd_v) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) ((i + 1) % TX_BD_NUM));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) for (i = 0; i < RX_BD_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) nixge_hw_dma_bd_set_next(&priv->rx_bd_v[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) priv->rx_bd_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) + sizeof(*priv->rx_bd_v) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) ((i + 1) % RX_BD_NUM));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) skb = netdev_alloc_skb_ip_align(ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) NIXGE_MAX_JUMBO_FRAME_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) nixge_hw_dma_bd_set_offset(&priv->rx_bd_v[i], (uintptr_t)skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) phys = dma_map_single(ndev->dev.parent, skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) NIXGE_MAX_JUMBO_FRAME_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) nixge_hw_dma_bd_set_phys(&priv->rx_bd_v[i], phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) priv->rx_bd_v[i].cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /* Start updating the Rx channel control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /* Update the interrupt coalesce count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) ((priv->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /* Update the delay timer count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) cr = ((cr & ~XAXIDMA_DELAY_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /* Enable coalesce, delay timer and error interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) cr |= XAXIDMA_IRQ_ALL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) /* Write to the Rx channel control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /* Start updating the Tx channel control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) /* Update the interrupt coalesce count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) ((priv->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /* Update the delay timer count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) /* Enable coalesce, delay timer and error interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) cr |= XAXIDMA_IRQ_ALL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /* Write to the Tx channel control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /* Populate the tail pointer and bring the Rx Axi DMA engine out of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * halted state. This will make the Rx side ready for reception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) nixge_dma_write_desc_reg(priv, XAXIDMA_RX_CDESC_OFFSET, priv->rx_bd_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) cr | XAXIDMA_CR_RUNSTOP_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) nixge_dma_write_desc_reg(priv, XAXIDMA_RX_TDESC_OFFSET, priv->rx_bd_p +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) (sizeof(*priv->rx_bd_v) * (RX_BD_NUM - 1)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /* Write to the RS (Run-stop) bit in the Tx channel control register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * Tx channel is now ready to run. But only after we write to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * tail pointer register that the Tx channel will start transmitting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) nixge_dma_write_desc_reg(priv, XAXIDMA_TX_CDESC_OFFSET, priv->tx_bd_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) cr | XAXIDMA_CR_RUNSTOP_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) nixge_hw_dma_bd_release(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) static void __nixge_device_reset(struct nixge_priv *priv, off_t offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) /* Reset Axi DMA. This would reset NIXGE Ethernet core as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * The reset process of Axi DMA takes a while to complete as all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * pending commands/transfers will be flushed or completed during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * this reset process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) nixge_dma_write_reg(priv, offset, XAXIDMA_CR_RESET_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) err = nixge_dma_poll_timeout(priv, offset, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) !(status & XAXIDMA_CR_RESET_MASK), 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) netdev_err(priv->ndev, "%s: DMA reset timeout!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) static void nixge_device_reset(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) struct nixge_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) __nixge_device_reset(priv, XAXIDMA_TX_CR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) __nixge_device_reset(priv, XAXIDMA_RX_CR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (nixge_hw_dma_bd_init(ndev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) netdev_err(ndev, "%s: descriptor allocation failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) netif_trans_update(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) static void nixge_handle_link_change(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) struct nixge_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) struct phy_device *phydev = ndev->phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (phydev->link != priv->link || phydev->speed != priv->speed ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) phydev->duplex != priv->duplex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) priv->link = phydev->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) priv->speed = phydev->speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) priv->duplex = phydev->duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) phy_print_status(phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static void nixge_tx_skb_unmap(struct nixge_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct nixge_tx_skb *tx_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (tx_skb->mapping) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (tx_skb->mapped_as_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) dma_unmap_page(priv->ndev->dev.parent, tx_skb->mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) tx_skb->size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) dma_unmap_single(priv->ndev->dev.parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) tx_skb->mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) tx_skb->size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) tx_skb->mapping = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (tx_skb->skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) dev_kfree_skb_any(tx_skb->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) tx_skb->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) static void nixge_start_xmit_done(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) struct nixge_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) struct nixge_hw_dma_bd *cur_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct nixge_tx_skb *tx_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) unsigned int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) u32 packets = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) u32 size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) cur_p = &priv->tx_bd_v[priv->tx_bd_ci];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) tx_skb = &priv->tx_skb[priv->tx_bd_ci];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) status = cur_p->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) nixge_tx_skb_unmap(priv, tx_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) cur_p->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) ++priv->tx_bd_ci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) priv->tx_bd_ci %= TX_BD_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) cur_p = &priv->tx_bd_v[priv->tx_bd_ci];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) tx_skb = &priv->tx_skb[priv->tx_bd_ci];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) status = cur_p->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) ndev->stats.tx_packets += packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) ndev->stats.tx_bytes += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (packets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) netif_wake_queue(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) static int nixge_check_tx_bd_space(struct nixge_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) int num_frag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) struct nixge_hw_dma_bd *cur_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) cur_p = &priv->tx_bd_v[(priv->tx_bd_tail + num_frag) % TX_BD_NUM];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) static netdev_tx_t nixge_start_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct nixge_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) struct nixge_hw_dma_bd *cur_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) struct nixge_tx_skb *tx_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) dma_addr_t tail_p, cur_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) skb_frag_t *frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) u32 num_frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) u32 ii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) num_frag = skb_shinfo(skb)->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) tx_skb = &priv->tx_skb[priv->tx_bd_tail];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (nixge_check_tx_bd_space(priv, num_frag)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (!netif_queue_stopped(ndev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) netif_stop_queue(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) cur_phys = dma_map_single(ndev->dev.parent, skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) skb_headlen(skb), DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (dma_mapping_error(ndev->dev.parent, cur_phys))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) nixge_hw_dma_bd_set_phys(cur_p, cur_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) tx_skb->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) tx_skb->mapping = cur_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) tx_skb->size = skb_headlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) tx_skb->mapped_as_page = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) for (ii = 0; ii < num_frag; ii++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) ++priv->tx_bd_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) priv->tx_bd_tail %= TX_BD_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) tx_skb = &priv->tx_skb[priv->tx_bd_tail];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) frag = &skb_shinfo(skb)->frags[ii];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) cur_phys = skb_frag_dma_map(ndev->dev.parent, frag, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) skb_frag_size(frag),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (dma_mapping_error(ndev->dev.parent, cur_phys))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) goto frag_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) nixge_hw_dma_bd_set_phys(cur_p, cur_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) cur_p->cntrl = skb_frag_size(frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) tx_skb->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) tx_skb->mapping = cur_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) tx_skb->size = skb_frag_size(frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) tx_skb->mapped_as_page = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /* last buffer of the frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) tx_skb->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) tail_p = priv->tx_bd_p + sizeof(*priv->tx_bd_v) * priv->tx_bd_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) /* Start the transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) nixge_dma_write_desc_reg(priv, XAXIDMA_TX_TDESC_OFFSET, tail_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) ++priv->tx_bd_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) priv->tx_bd_tail %= TX_BD_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) frag_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) for (; ii > 0; ii--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (priv->tx_bd_tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) priv->tx_bd_tail--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) priv->tx_bd_tail = TX_BD_NUM - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) tx_skb = &priv->tx_skb[priv->tx_bd_tail];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) nixge_tx_skb_unmap(priv, tx_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) cur_p->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) dma_unmap_single(priv->ndev->dev.parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) tx_skb->mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) tx_skb->size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) ndev->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) static int nixge_recv(struct net_device *ndev, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) struct nixge_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) struct sk_buff *skb, *new_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) struct nixge_hw_dma_bd *cur_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) dma_addr_t tail_p = 0, cur_phys = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) u32 packets = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) u32 length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) u32 size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) cur_p = &priv->rx_bd_v[priv->rx_bd_ci];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) budget > packets)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) tail_p = priv->rx_bd_p + sizeof(*priv->rx_bd_v) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) priv->rx_bd_ci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) skb = (struct sk_buff *)(uintptr_t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) nixge_hw_dma_bd_get_addr(cur_p, sw_id_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) length = cur_p->status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (length > NIXGE_MAX_JUMBO_FRAME_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) length = NIXGE_MAX_JUMBO_FRAME_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) dma_unmap_single(ndev->dev.parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) nixge_hw_dma_bd_get_addr(cur_p, phys),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) NIXGE_MAX_JUMBO_FRAME_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) skb_put(skb, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) skb->protocol = eth_type_trans(skb, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) skb_checksum_none_assert(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) /* For now mark them as CHECKSUM_NONE since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * we don't have offload capabilities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) skb->ip_summed = CHECKSUM_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) napi_gro_receive(&priv->napi, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) size += length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) new_skb = netdev_alloc_skb_ip_align(ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) NIXGE_MAX_JUMBO_FRAME_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (!new_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) return packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) cur_phys = dma_map_single(ndev->dev.parent, new_skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) NIXGE_MAX_JUMBO_FRAME_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (dma_mapping_error(ndev->dev.parent, cur_phys)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) /* FIXME: bail out and clean up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) netdev_err(ndev, "Failed to map ...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) nixge_hw_dma_bd_set_phys(cur_p, cur_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) cur_p->cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) cur_p->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) nixge_hw_dma_bd_set_offset(cur_p, (uintptr_t)new_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) ++priv->rx_bd_ci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) priv->rx_bd_ci %= RX_BD_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) cur_p = &priv->rx_bd_v[priv->rx_bd_ci];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) ndev->stats.rx_packets += packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) ndev->stats.rx_bytes += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (tail_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) nixge_dma_write_desc_reg(priv, XAXIDMA_RX_TDESC_OFFSET, tail_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) return packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) static int nixge_poll(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) struct nixge_priv *priv = container_of(napi, struct nixge_priv, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) int work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) u32 status, cr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) work_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) work_done = nixge_recv(priv->ndev, budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) if (work_done < budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) napi_complete_done(napi, work_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) /* If there's more, reschedule, but clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) napi_reschedule(napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) /* if not, turn on RX IRQs again ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) cr |= (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) static irqreturn_t nixge_tx_irq(int irq, void *_ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) struct nixge_priv *priv = netdev_priv(_ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) struct net_device *ndev = _ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) unsigned int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) dma_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) u32 cr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) status = nixge_dma_read_reg(priv, XAXIDMA_TX_SR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) nixge_start_xmit_done(priv->ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (!(status & XAXIDMA_IRQ_ALL_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) netdev_err(ndev, "No interrupts asserted in Tx path\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (status & XAXIDMA_IRQ_ERROR_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) phys = nixge_hw_dma_bd_get_addr(&priv->tx_bd_v[priv->tx_bd_ci],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) netdev_err(ndev, "DMA Tx error 0x%x\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) netdev_err(ndev, "Current BD is at: 0x%llx\n", (u64)phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) /* Disable coalesce, delay timer and error interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) cr &= (~XAXIDMA_IRQ_ALL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) /* Write to the Tx channel control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) /* Disable coalesce, delay timer and error interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) cr &= (~XAXIDMA_IRQ_ALL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) /* Write to the Rx channel control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) tasklet_schedule(&priv->dma_err_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) static irqreturn_t nixge_rx_irq(int irq, void *_ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) struct nixge_priv *priv = netdev_priv(_ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) struct net_device *ndev = _ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) unsigned int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) dma_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) u32 cr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) /* Turn of IRQs because NAPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (napi_schedule_prep(&priv->napi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) __napi_schedule(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (!(status & XAXIDMA_IRQ_ALL_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) netdev_err(ndev, "No interrupts asserted in Rx path\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (status & XAXIDMA_IRQ_ERROR_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) phys = nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[priv->rx_bd_ci],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) netdev_err(ndev, "DMA Rx error 0x%x\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) netdev_err(ndev, "Current BD is at: 0x%llx\n", (u64)phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) /* Disable coalesce, delay timer and error interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) cr &= (~XAXIDMA_IRQ_ALL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) /* Finally write to the Tx channel control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) /* Disable coalesce, delay timer and error interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) cr &= (~XAXIDMA_IRQ_ALL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) /* write to the Rx channel control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) tasklet_schedule(&priv->dma_err_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) static void nixge_dma_err_handler(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) struct nixge_priv *lp = from_tasklet(lp, t, dma_err_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) struct nixge_hw_dma_bd *cur_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) struct nixge_tx_skb *tx_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) u32 cr, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) __nixge_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) __nixge_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) for (i = 0; i < TX_BD_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) cur_p = &lp->tx_bd_v[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) tx_skb = &lp->tx_skb[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) nixge_tx_skb_unmap(lp, tx_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) nixge_hw_dma_bd_set_phys(cur_p, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) cur_p->cntrl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) cur_p->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) nixge_hw_dma_bd_set_offset(cur_p, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) for (i = 0; i < RX_BD_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) cur_p = &lp->rx_bd_v[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) cur_p->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) lp->tx_bd_ci = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) lp->tx_bd_tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) lp->rx_bd_ci = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) /* Start updating the Rx channel control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) /* Update the interrupt coalesce count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) /* Update the delay timer count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) cr = ((cr & ~XAXIDMA_DELAY_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) /* Enable coalesce, delay timer and error interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) cr |= XAXIDMA_IRQ_ALL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) /* Finally write to the Rx channel control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET, cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) /* Start updating the Tx channel control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) /* Update the interrupt coalesce count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) /* Update the delay timer count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) /* Enable coalesce, delay timer and error interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) cr |= XAXIDMA_IRQ_ALL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) /* Finally write to the Tx channel control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET, cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) /* Populate the tail pointer and bring the Rx Axi DMA engine out of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * halted state. This will make the Rx side ready for reception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) nixge_dma_write_desc_reg(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) cr | XAXIDMA_CR_RUNSTOP_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) nixge_dma_write_desc_reg(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) /* Write to the RS (Run-stop) bit in the Tx channel control register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * Tx channel is now ready to run. But only after we write to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) * tail pointer register that the Tx channel will start transmitting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) nixge_dma_write_desc_reg(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) cr | XAXIDMA_CR_RUNSTOP_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) static int nixge_open(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) struct nixge_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) struct phy_device *phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) nixge_device_reset(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) phy = of_phy_connect(ndev, priv->phy_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) &nixge_handle_link_change, 0, priv->phy_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) if (!phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) phy_start(phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) /* Enable tasklets for Axi DMA error handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) tasklet_setup(&priv->dma_err_tasklet, nixge_dma_err_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) napi_enable(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) /* Enable interrupts for Axi DMA Tx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) ret = request_irq(priv->tx_irq, nixge_tx_irq, 0, ndev->name, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) goto err_tx_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) /* Enable interrupts for Axi DMA Rx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) ret = request_irq(priv->rx_irq, nixge_rx_irq, 0, ndev->name, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) goto err_rx_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) netif_start_queue(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) err_rx_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) free_irq(priv->tx_irq, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) err_tx_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) phy_stop(phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) phy_disconnect(phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) tasklet_kill(&priv->dma_err_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) netdev_err(ndev, "request_irq() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) static int nixge_stop(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) struct nixge_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) u32 cr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) netif_stop_queue(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) napi_disable(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (ndev->phydev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) phy_stop(ndev->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) phy_disconnect(ndev->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) cr & (~XAXIDMA_CR_RUNSTOP_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) cr & (~XAXIDMA_CR_RUNSTOP_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) tasklet_kill(&priv->dma_err_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) free_irq(priv->tx_irq, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) free_irq(priv->rx_irq, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) nixge_hw_dma_bd_release(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) static int nixge_change_mtu(struct net_device *ndev, int new_mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) if (netif_running(ndev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if ((new_mtu + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) NIXGE_MAX_JUMBO_FRAME_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) ndev->mtu = new_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) static s32 __nixge_hw_set_mac_address(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) struct nixge_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_LSB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) (ndev->dev_addr[2]) << 24 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) (ndev->dev_addr[3] << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) (ndev->dev_addr[4] << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) (ndev->dev_addr[5] << 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_MSB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) (ndev->dev_addr[1] | (ndev->dev_addr[0] << 8)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) static int nixge_net_set_mac_address(struct net_device *ndev, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) err = eth_mac_addr(ndev, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) __nixge_hw_set_mac_address(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) static const struct net_device_ops nixge_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) .ndo_open = nixge_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) .ndo_stop = nixge_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) .ndo_start_xmit = nixge_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) .ndo_change_mtu = nixge_change_mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) .ndo_set_mac_address = nixge_net_set_mac_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) static void nixge_ethtools_get_drvinfo(struct net_device *ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) struct ethtool_drvinfo *ed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) strlcpy(ed->driver, "nixge", sizeof(ed->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) strlcpy(ed->bus_info, "platform", sizeof(ed->bus_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) static int nixge_ethtools_get_coalesce(struct net_device *ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) struct ethtool_coalesce *ecoalesce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) struct nixge_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) u32 regval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) regval = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) >> XAXIDMA_COALESCE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) regval = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) >> XAXIDMA_COALESCE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) static int nixge_ethtools_set_coalesce(struct net_device *ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) struct ethtool_coalesce *ecoalesce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) struct nixge_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (netif_running(ndev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) netdev_err(ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) "Please stop netif before applying configuration\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) if (ecoalesce->rx_max_coalesced_frames)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) priv->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) if (ecoalesce->tx_max_coalesced_frames)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) priv->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) static int nixge_ethtools_set_phys_id(struct net_device *ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) enum ethtool_phys_id_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) struct nixge_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) u32 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) ctrl = nixge_ctrl_read_reg(priv, NIXGE_REG_LED_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) case ETHTOOL_ID_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) ctrl |= NIXGE_ID_LED_CTL_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) /* Enable identification LED override*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) return 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) case ETHTOOL_ID_ON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) ctrl |= NIXGE_ID_LED_CTL_VAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) case ETHTOOL_ID_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) ctrl &= ~NIXGE_ID_LED_CTL_VAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) case ETHTOOL_ID_INACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) /* Restore LED settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) ctrl &= ~NIXGE_ID_LED_CTL_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) static const struct ethtool_ops nixge_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) .get_drvinfo = nixge_ethtools_get_drvinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) .get_coalesce = nixge_ethtools_get_coalesce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) .set_coalesce = nixge_ethtools_set_coalesce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) .set_phys_id = nixge_ethtools_set_phys_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) .get_link_ksettings = phy_ethtool_get_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) .set_link_ksettings = phy_ethtool_set_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) .get_link = ethtool_op_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) static int nixge_mdio_read(struct mii_bus *bus, int phy_id, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) struct nixge_priv *priv = bus->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) u32 status, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) u16 device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (reg & MII_ADDR_C45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) device = (reg >> 16) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) !status, 10, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) dev_err(priv->dev, "timeout setting address");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_READ) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) device = reg & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) tmp = NIXGE_MDIO_CLAUSE22 | NIXGE_MDIO_OP(NIXGE_MDIO_C22_READ) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) !status, 10, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) dev_err(priv->dev, "timeout setting read command");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) status = nixge_ctrl_read_reg(priv, NIXGE_REG_MDIO_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) static int nixge_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) struct nixge_priv *priv = bus->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) u32 status, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) u16 device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) if (reg & MII_ADDR_C45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) device = (reg >> 16) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) !status, 10, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) dev_err(priv->dev, "timeout setting address");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) !status, 10, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) dev_err(priv->dev, "timeout setting write command");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) device = reg & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) tmp = NIXGE_MDIO_CLAUSE22 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) NIXGE_MDIO_OP(NIXGE_MDIO_C22_WRITE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) !status, 10, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) dev_err(priv->dev, "timeout setting write command");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) static int nixge_mdio_setup(struct nixge_priv *priv, struct device_node *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) struct mii_bus *bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) bus = devm_mdiobus_alloc(priv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) if (!bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(priv->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) bus->priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) bus->name = "nixge_mii_bus";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) bus->read = nixge_mdio_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) bus->write = nixge_mdio_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) bus->parent = priv->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) priv->mii_bus = bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) return of_mdiobus_register(bus, np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) static void *nixge_get_nvmem_address(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) struct nvmem_cell *cell;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) size_t cell_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) char *mac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) cell = nvmem_cell_get(dev, "address");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) if (IS_ERR(cell))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) mac = nvmem_cell_read(cell, &cell_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) nvmem_cell_put(cell);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) return mac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) /* Match table for of_platform binding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) static const struct of_device_id nixge_dt_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) { .compatible = "ni,xge-enet-2.00", .data = (void *)NIXGE_V2 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) { .compatible = "ni,xge-enet-3.00", .data = (void *)NIXGE_V3 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) MODULE_DEVICE_TABLE(of, nixge_dt_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) static int nixge_of_get_resources(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) const struct of_device_id *of_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) enum nixge_version version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) struct resource *ctrlres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) struct resource *dmares;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) struct net_device *ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) struct nixge_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) ndev = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) of_id = of_match_node(nixge_dt_ids, pdev->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) if (!of_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) version = (enum nixge_version)of_id->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) if (version <= NIXGE_V2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) dmares = platform_get_resource_byname(pdev, IORESOURCE_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) "dma");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) priv->dma_regs = devm_ioremap_resource(&pdev->dev, dmares);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) if (IS_ERR(priv->dma_regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) netdev_err(ndev, "failed to map dma regs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) return PTR_ERR(priv->dma_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) if (version <= NIXGE_V2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) priv->ctrl_regs = priv->dma_regs + NIXGE_REG_CTRL_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) ctrlres = platform_get_resource_byname(pdev, IORESOURCE_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) "ctrl");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) priv->ctrl_regs = devm_ioremap_resource(&pdev->dev, ctrlres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if (IS_ERR(priv->ctrl_regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) netdev_err(ndev, "failed to map ctrl regs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) return PTR_ERR(priv->ctrl_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) static int nixge_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) struct device_node *mn, *phy_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) struct nixge_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) struct net_device *ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) const u8 *mac_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) ndev = alloc_etherdev(sizeof(*priv));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) if (!ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) platform_set_drvdata(pdev, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) SET_NETDEV_DEV(ndev, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) ndev->features = NETIF_F_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) ndev->netdev_ops = &nixge_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) ndev->ethtool_ops = &nixge_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) /* MTU range: 64 - 9000 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) ndev->min_mtu = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) ndev->max_mtu = NIXGE_JUMBO_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) mac_addr = nixge_get_nvmem_address(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) if (mac_addr && is_valid_ether_addr(mac_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) ether_addr_copy(ndev->dev_addr, mac_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) kfree(mac_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) eth_hw_addr_random(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) priv->ndev = ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) priv->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) err = nixge_of_get_resources(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) goto free_netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) __nixge_hw_set_mac_address(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) priv->tx_irq = platform_get_irq_byname(pdev, "tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) if (priv->tx_irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) netdev_err(ndev, "could not find 'tx' irq");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) err = priv->tx_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) goto free_netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) priv->rx_irq = platform_get_irq_byname(pdev, "rx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) if (priv->rx_irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) netdev_err(ndev, "could not find 'rx' irq");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) err = priv->rx_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) goto free_netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) priv->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) mn = of_get_child_by_name(pdev->dev.of_node, "mdio");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) if (mn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) err = nixge_mdio_setup(priv, mn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) of_node_put(mn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) netdev_err(ndev, "error registering mdio bus");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) goto free_netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) err = of_get_phy_mode(pdev->dev.of_node, &priv->phy_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) netdev_err(ndev, "not find \"phy-mode\" property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) goto unregister_mdio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) if (!phy_node && of_phy_is_fixed_link(pdev->dev.of_node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) err = of_phy_register_fixed_link(pdev->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) netdev_err(ndev, "broken fixed-link specification\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) goto unregister_mdio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) phy_node = of_node_get(pdev->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) priv->phy_node = phy_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) err = register_netdev(priv->ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) netdev_err(ndev, "register_netdev() error (%i)\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) goto free_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) free_phy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) if (of_phy_is_fixed_link(pdev->dev.of_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) of_phy_deregister_fixed_link(pdev->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) of_node_put(phy_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) unregister_mdio:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) if (priv->mii_bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) mdiobus_unregister(priv->mii_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) free_netdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) free_netdev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) static int nixge_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) struct net_device *ndev = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) struct nixge_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) unregister_netdev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) if (of_phy_is_fixed_link(pdev->dev.of_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) of_phy_deregister_fixed_link(pdev->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) of_node_put(priv->phy_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) if (priv->mii_bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) mdiobus_unregister(priv->mii_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) free_netdev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) static struct platform_driver nixge_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) .probe = nixge_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) .remove = nixge_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) .name = "nixge",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) .of_match_table = of_match_ptr(nixge_dt_ids),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) module_platform_driver(nixge_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) MODULE_DESCRIPTION("National Instruments XGE Management MAC");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) MODULE_AUTHOR("Moritz Fischer <mdf@kernel.org>");