^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2010-2011 Calxeda, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/mod_devicetable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/circ_buf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/if.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) /* XGMAC Register definitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define XGMAC_CONTROL 0x00000000 /* MAC Configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define XGMAC_FRAME_FILTER 0x00000004 /* MAC Frame Filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define XGMAC_FLOW_CTRL 0x00000018 /* MAC Flow Control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define XGMAC_VLAN_TAG 0x0000001C /* VLAN Tags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define XGMAC_VERSION 0x00000020 /* Version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define XGMAC_VLAN_INCL 0x00000024 /* VLAN tag for tx frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define XGMAC_LPI_CTRL 0x00000028 /* LPI Control and Status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define XGMAC_LPI_TIMER 0x0000002C /* LPI Timers Control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define XGMAC_TX_PACE 0x00000030 /* Transmit Pace and Stretch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define XGMAC_VLAN_HASH 0x00000034 /* VLAN Hash Table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define XGMAC_DEBUG 0x00000038 /* Debug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define XGMAC_INT_STAT 0x0000003C /* Interrupt and Control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define XGMAC_ADDR_HIGH(reg) (0x00000040 + ((reg) * 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define XGMAC_ADDR_LOW(reg) (0x00000044 + ((reg) * 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define XGMAC_HASH(n) (0x00000300 + (n) * 4) /* HASH table regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define XGMAC_NUM_HASH 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define XGMAC_OMR 0x00000400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define XGMAC_REMOTE_WAKE 0x00000700 /* Remote Wake-Up Frm Filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define XGMAC_PMT 0x00000704 /* PMT Control and Status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define XGMAC_MMC_CTRL 0x00000800 /* XGMAC MMC Control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define XGMAC_MMC_INTR_RX 0x00000804 /* Receive Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define XGMAC_MMC_INTR_TX 0x00000808 /* Transmit Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define XGMAC_MMC_INTR_MASK_RX 0x0000080c /* Receive Interrupt Mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define XGMAC_MMC_INTR_MASK_TX 0x00000810 /* Transmit Interrupt Mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* Hardware TX Statistics Counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define XGMAC_MMC_TXOCTET_GB_LO 0x00000814
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define XGMAC_MMC_TXOCTET_GB_HI 0x00000818
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define XGMAC_MMC_TXFRAME_GB_LO 0x0000081C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define XGMAC_MMC_TXFRAME_GB_HI 0x00000820
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define XGMAC_MMC_TXBCFRAME_G 0x00000824
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define XGMAC_MMC_TXMCFRAME_G 0x0000082C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define XGMAC_MMC_TXUCFRAME_GB 0x00000864
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define XGMAC_MMC_TXMCFRAME_GB 0x0000086C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define XGMAC_MMC_TXBCFRAME_GB 0x00000874
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define XGMAC_MMC_TXUNDERFLOW 0x0000087C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define XGMAC_MMC_TXOCTET_G_LO 0x00000884
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define XGMAC_MMC_TXOCTET_G_HI 0x00000888
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define XGMAC_MMC_TXFRAME_G_LO 0x0000088C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define XGMAC_MMC_TXFRAME_G_HI 0x00000890
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define XGMAC_MMC_TXPAUSEFRAME 0x00000894
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define XGMAC_MMC_TXVLANFRAME 0x0000089C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* Hardware RX Statistics Counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define XGMAC_MMC_RXFRAME_GB_LO 0x00000900
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define XGMAC_MMC_RXFRAME_GB_HI 0x00000904
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define XGMAC_MMC_RXOCTET_GB_LO 0x00000908
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define XGMAC_MMC_RXOCTET_GB_HI 0x0000090C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define XGMAC_MMC_RXOCTET_G_LO 0x00000910
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define XGMAC_MMC_RXOCTET_G_HI 0x00000914
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define XGMAC_MMC_RXBCFRAME_G 0x00000918
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define XGMAC_MMC_RXMCFRAME_G 0x00000920
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define XGMAC_MMC_RXCRCERR 0x00000928
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define XGMAC_MMC_RXRUNT 0x00000930
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define XGMAC_MMC_RXJABBER 0x00000934
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define XGMAC_MMC_RXUCFRAME_G 0x00000970
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define XGMAC_MMC_RXLENGTHERR 0x00000978
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define XGMAC_MMC_RXPAUSEFRAME 0x00000988
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define XGMAC_MMC_RXOVERFLOW 0x00000990
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define XGMAC_MMC_RXVLANFRAME 0x00000998
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define XGMAC_MMC_RXWATCHDOG 0x000009a0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /* DMA Control and Status Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define XGMAC_DMA_BUS_MODE 0x00000f00 /* Bus Mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define XGMAC_DMA_TX_POLL 0x00000f04 /* Transmit Poll Demand */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define XGMAC_DMA_RX_POLL 0x00000f08 /* Received Poll Demand */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define XGMAC_DMA_RX_BASE_ADDR 0x00000f0c /* Receive List Base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define XGMAC_DMA_TX_BASE_ADDR 0x00000f10 /* Transmit List Base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define XGMAC_DMA_STATUS 0x00000f14 /* Status Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define XGMAC_DMA_CONTROL 0x00000f18 /* Ctrl (Operational Mode) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define XGMAC_DMA_INTR_ENA 0x00000f1c /* Interrupt Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define XGMAC_DMA_MISS_FRAME_CTR 0x00000f20 /* Missed Frame Counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define XGMAC_DMA_RI_WDOG_TIMER 0x00000f24 /* RX Intr Watchdog Timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define XGMAC_DMA_AXI_BUS 0x00000f28 /* AXI Bus Mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define XGMAC_DMA_AXI_STATUS 0x00000f2C /* AXI Status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define XGMAC_DMA_HW_FEATURE 0x00000f58 /* Enabled Hardware Features */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define XGMAC_ADDR_AE 0x80000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /* PMT Control and Status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define XGMAC_PMT_POINTER_RESET 0x80000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define XGMAC_PMT_GLBL_UNICAST 0x00000200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define XGMAC_PMT_WAKEUP_RX_FRM 0x00000040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define XGMAC_PMT_MAGIC_PKT 0x00000020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define XGMAC_PMT_WAKEUP_FRM_EN 0x00000004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define XGMAC_PMT_MAGIC_PKT_EN 0x00000002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define XGMAC_PMT_POWERDOWN 0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define XGMAC_CONTROL_SPD 0x40000000 /* Speed control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define XGMAC_CONTROL_SPD_MASK 0x60000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define XGMAC_CONTROL_SPD_1G 0x60000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define XGMAC_CONTROL_SPD_2_5G 0x40000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define XGMAC_CONTROL_SPD_10G 0x00000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define XGMAC_CONTROL_SARC 0x10000000 /* Source Addr Insert/Replace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define XGMAC_CONTROL_SARK_MASK 0x18000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define XGMAC_CONTROL_CAR 0x04000000 /* CRC Addition/Replacement */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define XGMAC_CONTROL_CAR_MASK 0x06000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define XGMAC_CONTROL_DP 0x01000000 /* Disable Padding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define XGMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on rx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define XGMAC_CONTROL_JD 0x00400000 /* Jabber disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define XGMAC_CONTROL_JE 0x00100000 /* Jumbo frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define XGMAC_CONTROL_LM 0x00001000 /* Loop-back mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define XGMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define XGMAC_CONTROL_ACS 0x00000080 /* Automatic Pad/FCS Strip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define XGMAC_CONTROL_DDIC 0x00000010 /* Disable Deficit Idle Count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define XGMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define XGMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /* XGMAC Frame Filter defines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define XGMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define XGMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define XGMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define XGMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define XGMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define XGMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define XGMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define XGMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define XGMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define XGMAC_FRAME_FILTER_VHF 0x00000800 /* VLAN Hash Filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define XGMAC_FRAME_FILTER_VPF 0x00001000 /* VLAN Perfect Filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define XGMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /* XGMAC FLOW CTRL defines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define XGMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define XGMAC_FLOW_CTRL_PT_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define XGMAC_FLOW_CTRL_DZQP 0x00000080 /* Disable Zero-Quanta Phase */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define XGMAC_FLOW_CTRL_PLT 0x00000020 /* Pause Low Threshold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define XGMAC_FLOW_CTRL_PLT_MASK 0x00000030 /* PLT MASK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #define XGMAC_FLOW_CTRL_UP 0x00000008 /* Unicast Pause Frame Detect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #define XGMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define XGMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* XGMAC_INT_STAT reg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define XGMAC_INT_STAT_PMTIM 0x00800000 /* PMT Interrupt Mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /* DMA Bus Mode register defines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #define DMA_BUS_MODE_ATDS 0x00000080 /* Alternate Descriptor Size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* Programmable burst length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #define DMA_BUS_MODE_PBL_SHIFT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #define DMA_BUS_MODE_RPBL_SHIFT 17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #define DMA_BUS_MODE_USP 0x00800000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #define DMA_BUS_MODE_8PBL 0x01000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #define DMA_BUS_MODE_AAL 0x02000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /* DMA Bus Mode register defines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define DMA_BUS_PR_RATIO_SHIFT 14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #define DMA_BUS_FB 0x00010000 /* Fixed Burst */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* DMA Control register defines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #define DMA_CONTROL_DFF 0x01000000 /* Disable flush of rx frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #define DMA_CONTROL_OSF 0x00000004 /* Operate on 2nd tx frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /* DMA Normal interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) #define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) DMA_INTR_ENA_TUE | DMA_INTR_ENA_TIE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) #define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) DMA_INTR_ENA_RUE | DMA_INTR_ENA_UNE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) DMA_INTR_ENA_OVE | DMA_INTR_ENA_TJE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) DMA_INTR_ENA_TSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /* DMA default interrupt mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) #define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) /* DMA Status register defines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) #define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) #define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) #define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) #define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) #define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) #define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) #define DMA_STATUS_TS_SHIFT 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) #define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) #define DMA_STATUS_RS_SHIFT 17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) #define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) #define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) #define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) #define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) #define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) #define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) #define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) #define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) #define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) #define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) #define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) #define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) #define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) #define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) #define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /* Common MAC defines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) #define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) #define MAC_ENABLE_RX 0x00000004 /* Receiver Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /* XGMAC Operation Mode Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) #define XGMAC_OMR_TSF 0x00200000 /* TX FIFO Store and Forward */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) #define XGMAC_OMR_FTF 0x00100000 /* Flush Transmit FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) #define XGMAC_OMR_TTC 0x00020000 /* Transmit Threshold Ctrl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) #define XGMAC_OMR_TTC_MASK 0x00030000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) #define XGMAC_OMR_RFD 0x00006000 /* FC Deactivation Threshold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) #define XGMAC_OMR_RFD_MASK 0x00007000 /* FC Deact Threshold MASK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) #define XGMAC_OMR_RFA 0x00000600 /* FC Activation Threshold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) #define XGMAC_OMR_RFA_MASK 0x00000E00 /* FC Act Threshold MASK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) #define XGMAC_OMR_EFC 0x00000100 /* Enable Hardware FC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) #define XGMAC_OMR_FEF 0x00000080 /* Forward Error Frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) #define XGMAC_OMR_DT 0x00000040 /* Drop TCP/IP csum Errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) #define XGMAC_OMR_RSF 0x00000020 /* RX FIFO Store and Forward */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) #define XGMAC_OMR_RTC_256 0x00000018 /* RX Threshold Ctrl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) #define XGMAC_OMR_RTC_MASK 0x00000018 /* RX Threshold Ctrl MASK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) /* XGMAC HW Features Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) #define DMA_HW_FEAT_TXCOESEL 0x00010000 /* TX Checksum offload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) #define XGMAC_MMC_CTRL_CNT_FRZ 0x00000008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /* XGMAC Descriptor Defines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) #define MAX_DESC_BUF_SZ (0x2000 - 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) #define RXDESC_EXT_STATUS 0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) #define RXDESC_CRC_ERR 0x00000002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) #define RXDESC_RX_ERR 0x00000008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) #define RXDESC_RX_WDOG 0x00000010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) #define RXDESC_FRAME_TYPE 0x00000020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) #define RXDESC_GIANT_FRAME 0x00000080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) #define RXDESC_LAST_SEG 0x00000100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) #define RXDESC_FIRST_SEG 0x00000200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) #define RXDESC_VLAN_FRAME 0x00000400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) #define RXDESC_OVERFLOW_ERR 0x00000800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) #define RXDESC_LENGTH_ERR 0x00001000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) #define RXDESC_SA_FILTER_FAIL 0x00002000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) #define RXDESC_DESCRIPTOR_ERR 0x00004000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) #define RXDESC_ERROR_SUMMARY 0x00008000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) #define RXDESC_FRAME_LEN_OFFSET 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) #define RXDESC_FRAME_LEN_MASK 0x3fff0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) #define RXDESC_DA_FILTER_FAIL 0x40000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) #define RXDESC1_END_RING 0x00008000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) #define RXDESC_IP_PAYLOAD_MASK 0x00000003
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) #define RXDESC_IP_PAYLOAD_UDP 0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) #define RXDESC_IP_PAYLOAD_TCP 0x00000002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) #define RXDESC_IP_PAYLOAD_ICMP 0x00000003
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) #define RXDESC_IP_HEADER_ERR 0x00000008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) #define RXDESC_IP_PAYLOAD_ERR 0x00000010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) #define RXDESC_IPV4_PACKET 0x00000040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) #define RXDESC_IPV6_PACKET 0x00000080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) #define TXDESC_UNDERFLOW_ERR 0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) #define TXDESC_JABBER_TIMEOUT 0x00000002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) #define TXDESC_LOCAL_FAULT 0x00000004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) #define TXDESC_REMOTE_FAULT 0x00000008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) #define TXDESC_VLAN_FRAME 0x00000010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) #define TXDESC_FRAME_FLUSHED 0x00000020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) #define TXDESC_IP_HEADER_ERR 0x00000040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) #define TXDESC_PAYLOAD_CSUM_ERR 0x00000080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) #define TXDESC_ERROR_SUMMARY 0x00008000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) #define TXDESC_SA_CTRL_INSERT 0x00040000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) #define TXDESC_SA_CTRL_REPLACE 0x00080000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) #define TXDESC_2ND_ADDR_CHAINED 0x00100000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) #define TXDESC_END_RING 0x00200000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) #define TXDESC_CSUM_IP 0x00400000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) #define TXDESC_CSUM_IP_PAYLD 0x00800000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) #define TXDESC_CSUM_ALL 0x00C00000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) #define TXDESC_CRC_EN_REPLACE 0x01000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) #define TXDESC_CRC_EN_APPEND 0x02000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) #define TXDESC_DISABLE_PAD 0x04000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) #define TXDESC_FIRST_SEG 0x10000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) #define TXDESC_LAST_SEG 0x20000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) #define TXDESC_INTERRUPT 0x40000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) #define DESC_OWN 0x80000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) #define DESC_BUFFER1_SZ_MASK 0x00001fff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) #define DESC_BUFFER2_SZ_MASK 0x1fff0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) #define DESC_BUFFER2_SZ_OFFSET 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct xgmac_dma_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) __le32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) __le32 buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) __le32 buf1_addr; /* Buffer 1 Address Pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) __le32 buf2_addr; /* Buffer 2 Address Pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) __le32 ext_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) __le32 res[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct xgmac_extra_stats {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) /* Transmit errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) unsigned long tx_jabber;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) unsigned long tx_frame_flushed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) unsigned long tx_payload_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) unsigned long tx_ip_header_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) unsigned long tx_local_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) unsigned long tx_remote_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /* Receive errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) unsigned long rx_watchdog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) unsigned long rx_da_filter_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) unsigned long rx_payload_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) unsigned long rx_ip_header_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) /* Tx/Rx IRQ errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) unsigned long tx_process_stopped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) unsigned long rx_buf_unav;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) unsigned long rx_process_stopped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) unsigned long tx_early;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) unsigned long fatal_bus_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct xgmac_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct xgmac_dma_desc *dma_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) struct sk_buff **rx_skbuff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) unsigned int rx_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) unsigned int rx_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct xgmac_dma_desc *dma_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) struct sk_buff **tx_skbuff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) unsigned int tx_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) unsigned int tx_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) int tx_irq_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) unsigned int dma_buf_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) dma_addr_t dma_rx_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) dma_addr_t dma_tx_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) struct device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) struct napi_struct napi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) int max_macs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct xgmac_extra_stats xstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) spinlock_t stats_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) int pmt_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) char rx_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) char tx_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) int wolopts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct work_struct tx_timeout_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /* XGMAC Configuration Settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) #define XGMAC_MAX_MTU 9000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) #define PAUSE_TIME 0x400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) #define DMA_RX_RING_SZ 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) #define DMA_TX_RING_SZ 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) /* minimum number of free TX descriptors required to wake up TX process */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) #define TX_THRESH (DMA_TX_RING_SZ/4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /* DMA descriptor ring helpers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) #define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) #define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) #define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) #define tx_dma_ring_space(p) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) dma_ring_space((p)->tx_head, (p)->tx_tail, DMA_TX_RING_SZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /* XGMAC Descriptor Access Helpers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (buf_sz > MAX_DESC_BUF_SZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) p->buf_size = cpu_to_le32(MAX_DESC_BUF_SZ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) (buf_sz - MAX_DESC_BUF_SZ) << DESC_BUFFER2_SZ_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) p->buf_size = cpu_to_le32(buf_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static inline int desc_get_buf_len(struct xgmac_dma_desc *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) u32 len = le32_to_cpu(p->buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) return (len & DESC_BUFFER1_SZ_MASK) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) static inline void desc_init_rx_desc(struct xgmac_dma_desc *p, int ring_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) int buf_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct xgmac_dma_desc *end = p + ring_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) memset(p, 0, sizeof(*p) * ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) for (; p <= end; p++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) desc_set_buf_len(p, buf_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) end->buf_size |= cpu_to_le32(RXDESC1_END_RING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static inline void desc_init_tx_desc(struct xgmac_dma_desc *p, u32 ring_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) memset(p, 0, sizeof(*p) * ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) p[ring_size - 1].flags = cpu_to_le32(TXDESC_END_RING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static inline int desc_get_owner(struct xgmac_dma_desc *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return le32_to_cpu(p->flags) & DESC_OWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) static inline void desc_set_rx_owner(struct xgmac_dma_desc *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /* Clear all fields and set the owner */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) p->flags = cpu_to_le32(DESC_OWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) u32 tmpflags = le32_to_cpu(p->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) tmpflags &= TXDESC_END_RING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) tmpflags |= flags | DESC_OWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) p->flags = cpu_to_le32(tmpflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static inline void desc_clear_tx_owner(struct xgmac_dma_desc *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) u32 tmpflags = le32_to_cpu(p->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) tmpflags &= TXDESC_END_RING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) p->flags = cpu_to_le32(tmpflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) static inline int desc_get_tx_ls(struct xgmac_dma_desc *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) return le32_to_cpu(p->flags) & TXDESC_LAST_SEG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) static inline int desc_get_tx_fs(struct xgmac_dma_desc *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return le32_to_cpu(p->flags) & TXDESC_FIRST_SEG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) return le32_to_cpu(p->buf1_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) static inline void desc_set_buf_addr(struct xgmac_dma_desc *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) u32 paddr, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) p->buf1_addr = cpu_to_le32(paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (len > MAX_DESC_BUF_SZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) p->buf2_addr = cpu_to_le32(paddr + MAX_DESC_BUF_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) static inline void desc_set_buf_addr_and_size(struct xgmac_dma_desc *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) u32 paddr, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) desc_set_buf_len(p, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) desc_set_buf_addr(p, paddr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) static inline int desc_get_rx_frame_len(struct xgmac_dma_desc *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) u32 data = le32_to_cpu(p->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) u32 len = (data & RXDESC_FRAME_LEN_MASK) >> RXDESC_FRAME_LEN_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (data & RXDESC_FRAME_TYPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) len -= ETH_FCS_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) static void xgmac_dma_flush_tx_fifo(void __iomem *ioaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) int timeout = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) u32 reg = readl(ioaddr + XGMAC_OMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) writel(reg | XGMAC_OMR_FTF, ioaddr + XGMAC_OMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) while ((timeout-- > 0) && readl(ioaddr + XGMAC_OMR) & XGMAC_OMR_FTF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) static int desc_get_tx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) struct xgmac_extra_stats *x = &priv->xstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) u32 status = le32_to_cpu(p->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (!(status & TXDESC_ERROR_SUMMARY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) netdev_dbg(priv->dev, "tx desc error = 0x%08x\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (status & TXDESC_JABBER_TIMEOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) x->tx_jabber++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (status & TXDESC_FRAME_FLUSHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) x->tx_frame_flushed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (status & TXDESC_UNDERFLOW_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) xgmac_dma_flush_tx_fifo(priv->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (status & TXDESC_IP_HEADER_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) x->tx_ip_header_error++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (status & TXDESC_LOCAL_FAULT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) x->tx_local_fault++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (status & TXDESC_REMOTE_FAULT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) x->tx_remote_fault++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (status & TXDESC_PAYLOAD_CSUM_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) x->tx_payload_error++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) struct xgmac_extra_stats *x = &priv->xstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) int ret = CHECKSUM_UNNECESSARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) u32 status = le32_to_cpu(p->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) u32 ext_status = le32_to_cpu(p->ext_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (status & RXDESC_DA_FILTER_FAIL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) netdev_dbg(priv->dev, "XGMAC RX : Dest Address filter fail\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) x->rx_da_filter_fail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) /* All frames should fit into a single buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (!(status & RXDESC_FIRST_SEG) || !(status & RXDESC_LAST_SEG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) /* Check if packet has checksum already */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) !(ext_status & RXDESC_IP_PAYLOAD_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) ret = CHECKSUM_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) netdev_dbg(priv->dev, "rx status - frame type=%d, csum = %d, ext stat %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) (status & RXDESC_FRAME_TYPE) ? 1 : 0, ret, ext_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (!(status & RXDESC_ERROR_SUMMARY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) /* Handle any errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (status & (RXDESC_DESCRIPTOR_ERR | RXDESC_OVERFLOW_ERR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) RXDESC_GIANT_FRAME | RXDESC_LENGTH_ERR | RXDESC_CRC_ERR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (status & RXDESC_EXT_STATUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (ext_status & RXDESC_IP_HEADER_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) x->rx_ip_header_error++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (ext_status & RXDESC_IP_PAYLOAD_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) x->rx_payload_error++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) netdev_dbg(priv->dev, "IP checksum error - stat %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) ext_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) return CHECKSUM_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) static inline void xgmac_mac_enable(void __iomem *ioaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) u32 value = readl(ioaddr + XGMAC_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) value |= MAC_ENABLE_RX | MAC_ENABLE_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) writel(value, ioaddr + XGMAC_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) value = readl(ioaddr + XGMAC_DMA_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) value |= DMA_CONTROL_ST | DMA_CONTROL_SR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) writel(value, ioaddr + XGMAC_DMA_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) static inline void xgmac_mac_disable(void __iomem *ioaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) u32 value = readl(ioaddr + XGMAC_DMA_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) writel(value, ioaddr + XGMAC_DMA_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) value = readl(ioaddr + XGMAC_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) writel(value, ioaddr + XGMAC_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) u32 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) writel(data, ioaddr + XGMAC_ADDR_HIGH(num));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) writel(data, ioaddr + XGMAC_ADDR_LOW(num));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) writel(0, ioaddr + XGMAC_ADDR_HIGH(num));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) writel(0, ioaddr + XGMAC_ADDR_LOW(num));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) u32 hi_addr, lo_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) /* Read the MAC address from the hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) hi_addr = readl(ioaddr + XGMAC_ADDR_HIGH(num));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) lo_addr = readl(ioaddr + XGMAC_ADDR_LOW(num));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /* Extract the MAC address from the high and low words */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) addr[0] = lo_addr & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) addr[1] = (lo_addr >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) addr[2] = (lo_addr >> 16) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) addr[3] = (lo_addr >> 24) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) addr[4] = hi_addr & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) addr[5] = (hi_addr >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) static int xgmac_set_flow_ctrl(struct xgmac_priv *priv, int rx, int tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) unsigned int flow = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) priv->rx_pause = rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) priv->tx_pause = tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (rx || tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) flow |= XGMAC_FLOW_CTRL_RFE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) flow |= XGMAC_FLOW_CTRL_TFE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) flow |= XGMAC_FLOW_CTRL_PLT | XGMAC_FLOW_CTRL_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) flow |= (PAUSE_TIME << XGMAC_FLOW_CTRL_PT_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) writel(flow, priv->base + XGMAC_FLOW_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) reg = readl(priv->base + XGMAC_OMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) reg |= XGMAC_OMR_EFC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) writel(reg, priv->base + XGMAC_OMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) writel(0, priv->base + XGMAC_FLOW_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) reg = readl(priv->base + XGMAC_OMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) reg &= ~XGMAC_OMR_EFC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) writel(reg, priv->base + XGMAC_OMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) static void xgmac_rx_refill(struct xgmac_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) struct xgmac_dma_desc *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) dma_addr_t paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) int bufsz = priv->dev->mtu + ETH_HLEN + ETH_FCS_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) int entry = priv->rx_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) p = priv->dma_rx + entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (priv->rx_skbuff[entry] == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) skb = netdev_alloc_skb_ip_align(priv->dev, bufsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (unlikely(skb == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) paddr = dma_map_single(priv->device, skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) priv->dma_buf_sz - NET_IP_ALIGN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (dma_mapping_error(priv->device, paddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) priv->rx_skbuff[entry] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) netdev_dbg(priv->dev, "rx ring: head %d, tail %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) priv->rx_head, priv->rx_tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) priv->rx_head = dma_ring_incr(priv->rx_head, DMA_RX_RING_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) desc_set_rx_owner(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * init_xgmac_dma_desc_rings - init the RX/TX descriptor rings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * @dev: net device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * Description: this function initializes the DMA RX/TX descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * and allocates the socket buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) static int xgmac_dma_desc_rings_init(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) struct xgmac_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) unsigned int bfsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /* Set the Buffer size according to the MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * The total buffer size including any IP offset must be a multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * of 8 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) priv->rx_skbuff = kcalloc(DMA_RX_RING_SZ, sizeof(struct sk_buff *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (!priv->rx_skbuff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) priv->dma_rx = dma_alloc_coherent(priv->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) DMA_RX_RING_SZ *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) sizeof(struct xgmac_dma_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) &priv->dma_rx_phy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if (!priv->dma_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) goto err_dma_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) priv->tx_skbuff = kcalloc(DMA_TX_RING_SZ, sizeof(struct sk_buff *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (!priv->tx_skbuff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) goto err_tx_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) priv->dma_tx = dma_alloc_coherent(priv->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) DMA_TX_RING_SZ *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) sizeof(struct xgmac_dma_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) &priv->dma_tx_phy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (!priv->dma_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) goto err_dma_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) netdev_dbg(priv->dev, "DMA desc rings: virt addr (Rx %p, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) priv->dma_rx, priv->dma_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) priv->rx_tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) priv->rx_head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) priv->dma_buf_sz = bfsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) desc_init_rx_desc(priv->dma_rx, DMA_RX_RING_SZ, priv->dma_buf_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) xgmac_rx_refill(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) priv->tx_tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) priv->tx_head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) writel(priv->dma_rx_phy, priv->base + XGMAC_DMA_RX_BASE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) err_dma_tx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) kfree(priv->tx_skbuff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) err_tx_skb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) dma_free_coherent(priv->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) priv->dma_rx, priv->dma_rx_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) err_dma_rx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) kfree(priv->rx_skbuff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) static void xgmac_free_rx_skbufs(struct xgmac_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) struct xgmac_dma_desc *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (!priv->rx_skbuff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) for (i = 0; i < DMA_RX_RING_SZ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) struct sk_buff *skb = priv->rx_skbuff[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (skb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) p = priv->dma_rx + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) dma_unmap_single(priv->device, desc_get_buf_addr(p),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) priv->rx_skbuff[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) struct xgmac_dma_desc *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) if (!priv->tx_skbuff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) for (i = 0; i < DMA_TX_RING_SZ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if (priv->tx_skbuff[i] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) p = priv->dma_tx + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) if (desc_get_tx_fs(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) dma_unmap_single(priv->device, desc_get_buf_addr(p),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) desc_get_buf_len(p), DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) dma_unmap_page(priv->device, desc_get_buf_addr(p),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) desc_get_buf_len(p), DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) if (desc_get_tx_ls(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) dev_kfree_skb_any(priv->tx_skbuff[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) priv->tx_skbuff[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) /* Release the DMA TX/RX socket buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) xgmac_free_rx_skbufs(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) xgmac_free_tx_skbufs(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) /* Free the consistent memory allocated for descriptor rings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) if (priv->dma_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) dma_free_coherent(priv->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) DMA_TX_RING_SZ * sizeof(struct xgmac_dma_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) priv->dma_tx, priv->dma_tx_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) priv->dma_tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (priv->dma_rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) dma_free_coherent(priv->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) priv->dma_rx, priv->dma_rx_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) priv->dma_rx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) kfree(priv->rx_skbuff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) priv->rx_skbuff = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) kfree(priv->tx_skbuff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) priv->tx_skbuff = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * xgmac_tx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * @priv: private driver structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * Description: it reclaims resources after transmission completes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) static void xgmac_tx_complete(struct xgmac_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) unsigned int entry = priv->tx_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) struct sk_buff *skb = priv->tx_skbuff[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) struct xgmac_dma_desc *p = priv->dma_tx + entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) /* Check if the descriptor is owned by the DMA. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (desc_get_owner(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) priv->tx_head, priv->tx_tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) if (desc_get_tx_fs(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) dma_unmap_single(priv->device, desc_get_buf_addr(p),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) desc_get_buf_len(p), DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) dma_unmap_page(priv->device, desc_get_buf_addr(p),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) desc_get_buf_len(p), DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) /* Check tx error on the last segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (desc_get_tx_ls(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) desc_get_tx_status(priv, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) dev_consume_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) priv->tx_skbuff[entry] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) /* Ensure tx_tail is visible to xgmac_xmit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (unlikely(netif_queue_stopped(priv->dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) (tx_dma_ring_space(priv) > MAX_SKB_FRAGS)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) netif_wake_queue(priv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) static void xgmac_tx_timeout_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) u32 reg, value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) struct xgmac_priv *priv =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) container_of(work, struct xgmac_priv, tx_timeout_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) napi_disable(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) writel(0, priv->base + XGMAC_DMA_INTR_ENA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) netif_tx_lock(priv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) reg = readl(priv->base + XGMAC_DMA_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) value = readl(priv->base + XGMAC_DMA_STATUS) & 0x700000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) } while (value && (value != 0x600000));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) xgmac_free_tx_skbufs(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) priv->tx_tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) priv->tx_head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) writel(reg | DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) priv->base + XGMAC_DMA_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) netif_tx_unlock(priv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) netif_wake_queue(priv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) napi_enable(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) /* Enable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) static int xgmac_hw_init(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) u32 value, ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) int limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) struct xgmac_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) void __iomem *ioaddr = priv->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) /* Save the ctrl register value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) ctrl = readl(ioaddr + XGMAC_CONTROL) & XGMAC_CONTROL_SPD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) /* SW reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) value = DMA_BUS_MODE_SFT_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) limit = 15000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) while (limit-- &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) (readl(ioaddr + XGMAC_DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (limit < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) value = (0x10 << DMA_BUS_MODE_PBL_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) (0x10 << DMA_BUS_MODE_RPBL_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) writel(0, ioaddr + XGMAC_DMA_INTR_ENA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) /* Mask power mgt interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) /* XGMAC requires AXI bus init. This is a 'magic number' for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) ctrl |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_JE | XGMAC_CONTROL_ACS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) XGMAC_CONTROL_CAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) if (dev->features & NETIF_F_RXCSUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) ctrl |= XGMAC_CONTROL_IPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) writel(ctrl, ioaddr + XGMAC_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) writel(DMA_CONTROL_OSF, ioaddr + XGMAC_DMA_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) /* Set the HW DMA mode and the COE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) writel(XGMAC_OMR_TSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) XGMAC_OMR_RTC_256,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) ioaddr + XGMAC_OMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) /* Reset the MMC counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) writel(1, ioaddr + XGMAC_MMC_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * xgmac_open - open entry point of the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * @dev : pointer to the device structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) * This function is the open entry point of the driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) * 0 on success and an appropriate (-)ve integer as defined in errno.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * file on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) static int xgmac_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) struct xgmac_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) void __iomem *ioaddr = priv->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) /* Check that the MAC address is valid. If its not, refuse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) * to bring the device up. The user must specify an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * address using the following linux command:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (!is_valid_ether_addr(dev->dev_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) eth_hw_addr_random(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) netdev_dbg(priv->dev, "generated random MAC address %pM\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) /* Initialize the XGMAC and descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) xgmac_hw_init(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) xgmac_set_flow_ctrl(priv, priv->rx_pause, priv->tx_pause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) ret = xgmac_dma_desc_rings_init(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) /* Enable the MAC Rx/Tx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) xgmac_mac_enable(ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) napi_enable(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) /* Enable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * xgmac_release - close entry point of the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * @dev : device pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) * This is the stop entry point of the driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) static int xgmac_stop(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) struct xgmac_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) if (readl(priv->base + XGMAC_DMA_INTR_ENA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) napi_disable(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) writel(0, priv->base + XGMAC_DMA_INTR_ENA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) netif_tx_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) /* Disable the MAC core */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) xgmac_mac_disable(priv->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) /* Release and free the Rx/Tx resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) xgmac_free_dma_desc_rings(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * xgmac_xmit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) * @skb : the socket buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) * @dev : device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * Description : Tx entry point of the driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) struct xgmac_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) unsigned int entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) u32 irq_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) int nfrags = skb_shinfo(skb)->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) struct xgmac_dma_desc *desc, *first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) unsigned int desc_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) dma_addr_t paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) priv->tx_irq_cnt = (priv->tx_irq_cnt + 1) & (DMA_TX_RING_SZ/4 - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) irq_flag = priv->tx_irq_cnt ? 0 : TXDESC_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) TXDESC_CSUM_ALL : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) entry = priv->tx_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) desc = priv->dma_tx + entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) first = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) len = skb_headlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (dma_mapping_error(priv->device, paddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) priv->tx_skbuff[entry] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) desc_set_buf_addr_and_size(desc, paddr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) for (i = 0; i < nfrags; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) len = skb_frag_size(frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) paddr = skb_frag_dma_map(priv->device, frag, 0, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) if (dma_mapping_error(priv->device, paddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) goto dma_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) desc = priv->dma_tx + entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) priv->tx_skbuff[entry] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) desc_set_buf_addr_and_size(desc, paddr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) if (i < (nfrags - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) desc_set_tx_owner(desc, desc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) /* Interrupt on completition only for the latest segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) if (desc != first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) desc_set_tx_owner(desc, desc_flags |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) TXDESC_LAST_SEG | irq_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) desc_flags |= TXDESC_LAST_SEG | irq_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) /* Set owner on first desc last to avoid race condition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) writel(1, priv->base + XGMAC_DMA_TX_POLL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) /* Ensure tx_head update is visible to tx completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if (unlikely(tx_dma_ring_space(priv) <= MAX_SKB_FRAGS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) /* Ensure netif_stop_queue is visible to tx completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) if (tx_dma_ring_space(priv) > MAX_SKB_FRAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) dma_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) entry = priv->tx_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) for ( ; i > 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) desc = priv->dma_tx + entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) priv->tx_skbuff[entry] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) dma_unmap_page(priv->device, desc_get_buf_addr(desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) desc_get_buf_len(desc), DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) desc_clear_tx_owner(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) desc = first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) dma_unmap_single(priv->device, desc_get_buf_addr(desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) desc_get_buf_len(desc), DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) static int xgmac_rx(struct xgmac_priv *priv, int limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) unsigned int entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) unsigned int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) struct xgmac_dma_desc *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) while (count < limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) int ip_checksum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) int frame_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) if (!dma_ring_cnt(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) entry = priv->rx_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) p = priv->dma_rx + entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) if (desc_get_owner(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) priv->rx_tail = dma_ring_incr(priv->rx_tail, DMA_RX_RING_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) /* read the status of the incoming frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) ip_checksum = desc_get_rx_status(priv, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) if (ip_checksum < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) skb = priv->rx_skbuff[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) if (unlikely(!skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) netdev_err(priv->dev, "Inconsistent Rx descriptor chain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) priv->rx_skbuff[entry] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) frame_len = desc_get_rx_frame_len(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) netdev_dbg(priv->dev, "RX frame size %d, COE status: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) frame_len, ip_checksum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) skb_put(skb, frame_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) dma_unmap_single(priv->device, desc_get_buf_addr(p),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) skb->protocol = eth_type_trans(skb, priv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) skb->ip_summed = ip_checksum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) if (ip_checksum == CHECKSUM_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) netif_receive_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) napi_gro_receive(&priv->napi, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) xgmac_rx_refill(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) * xgmac_poll - xgmac poll method (NAPI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) * @napi : pointer to the napi structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) * @budget : maximum number of packets that the current CPU can receive from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) * all interfaces.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) * Description :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) * This function implements the the reception process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) * Also it runs the TX completion thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) static int xgmac_poll(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) struct xgmac_priv *priv = container_of(napi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) struct xgmac_priv, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) int work_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) xgmac_tx_complete(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) work_done = xgmac_rx(priv, budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) if (work_done < budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) napi_complete_done(napi, work_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) __raw_writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) * xgmac_tx_timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) * @dev : Pointer to net device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) * @txqueue: index of the hung transmit queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) * Description: this function is called when a packet transmission fails to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) * complete within a reasonable tmrate. The driver will mark the error in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) * netdev structure and arrange for the device to be reset to a sane state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) * in order to transmit a new packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) static void xgmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) struct xgmac_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) schedule_work(&priv->tx_timeout_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) * xgmac_set_rx_mode - entry point for multicast addressing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) * @dev : pointer to the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) * This function is a driver entry point which gets called by the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) * whenever multicast addresses must be enabled/disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) * void.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) static void xgmac_set_rx_mode(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) struct xgmac_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) void __iomem *ioaddr = priv->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) unsigned int value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) u32 hash_filter[XGMAC_NUM_HASH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) int reg = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) bool use_hash = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) netdev_mc_count(dev), netdev_uc_count(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) if (dev->flags & IFF_PROMISC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) value |= XGMAC_FRAME_FILTER_PR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) memset(hash_filter, 0, sizeof(hash_filter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) if (netdev_uc_count(dev) > priv->max_macs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) use_hash = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) netdev_for_each_uc_addr(ha, dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (use_hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) /* The most significant 4 bits determine the register to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) * use (H/L) while the other 5 bits determine the bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) * within the register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) xgmac_set_mac_addr(ioaddr, ha->addr, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) reg++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) if (dev->flags & IFF_ALLMULTI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) value |= XGMAC_FRAME_FILTER_PM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) if ((netdev_mc_count(dev) + reg - 1) > priv->max_macs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) use_hash = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) use_hash = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) netdev_for_each_mc_addr(ha, dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) if (use_hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) /* The most significant 4 bits determine the register to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) * use (H/L) while the other 5 bits determine the bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) * within the register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) xgmac_set_mac_addr(ioaddr, ha->addr, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) reg++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) for (i = reg; i <= priv->max_macs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) xgmac_set_mac_addr(ioaddr, NULL, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) for (i = 0; i < XGMAC_NUM_HASH; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) writel(value, ioaddr + XGMAC_FRAME_FILTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) * xgmac_change_mtu - entry point to change MTU size for the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) * @dev : device pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) * @new_mtu : the new MTU size for the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) * Description: the Maximum Transfer Unit (MTU) is used by the network layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) * to drive packet transmission. Ethernet has an MTU of 1500 octets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) * (ETH_DATA_LEN). This value can be changed with ifconfig.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) * 0 on success and an appropriate (-)ve integer as defined in errno.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) * file on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) static int xgmac_change_mtu(struct net_device *dev, int new_mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) /* Stop everything, get ready to change the MTU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) /* Bring interface down, change mtu and bring interface back up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) xgmac_stop(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) dev->mtu = new_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) return xgmac_open(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) u32 intr_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) struct net_device *dev = (struct net_device *)dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) struct xgmac_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) void __iomem *ioaddr = priv->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) intr_status = __raw_readl(ioaddr + XGMAC_INT_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) if (intr_status & XGMAC_INT_STAT_PMT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) netdev_dbg(priv->dev, "received Magic frame\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) /* clear the PMT bits 5 and 6 by reading the PMT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) readl(ioaddr + XGMAC_PMT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) u32 intr_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) struct net_device *dev = (struct net_device *)dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) struct xgmac_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) struct xgmac_extra_stats *x = &priv->xstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) /* read the status register (CSR5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) intr_status = __raw_readl(priv->base + XGMAC_DMA_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) intr_status &= __raw_readl(priv->base + XGMAC_DMA_INTR_ENA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) __raw_writel(intr_status, priv->base + XGMAC_DMA_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) /* It displays the DMA process states (CSR5 register) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) /* ABNORMAL interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) if (unlikely(intr_status & DMA_STATUS_AIS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) if (intr_status & DMA_STATUS_TJT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) netdev_err(priv->dev, "transmit jabber\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) x->tx_jabber++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) if (intr_status & DMA_STATUS_RU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) x->rx_buf_unav++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) if (intr_status & DMA_STATUS_RPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) netdev_err(priv->dev, "receive process stopped\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) x->rx_process_stopped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) if (intr_status & DMA_STATUS_ETI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) netdev_err(priv->dev, "transmit early interrupt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) x->tx_early++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) if (intr_status & DMA_STATUS_TPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) netdev_err(priv->dev, "transmit process stopped\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) x->tx_process_stopped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) schedule_work(&priv->tx_timeout_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) if (intr_status & DMA_STATUS_FBI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) netdev_err(priv->dev, "fatal bus error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) x->fatal_bus_error++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) /* TX/RX NORMAL interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU | DMA_STATUS_TI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) __raw_writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) napi_schedule(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) /* Polling receive - used by NETCONSOLE and other diagnostic tools
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) * to allow network I/O with interrupts disabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) static void xgmac_poll_controller(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) disable_irq(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) xgmac_interrupt(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) enable_irq(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) xgmac_get_stats64(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) struct rtnl_link_stats64 *storage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) struct xgmac_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) void __iomem *base = priv->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) u32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) spin_lock_bh(&priv->stats_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) writel(XGMAC_MMC_CTRL_CNT_FRZ, base + XGMAC_MMC_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) storage->rx_bytes = readl(base + XGMAC_MMC_RXOCTET_G_LO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) storage->rx_bytes |= (u64)(readl(base + XGMAC_MMC_RXOCTET_G_HI)) << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) storage->rx_packets = readl(base + XGMAC_MMC_RXFRAME_GB_LO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) storage->multicast = readl(base + XGMAC_MMC_RXMCFRAME_G);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) storage->rx_crc_errors = readl(base + XGMAC_MMC_RXCRCERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) storage->rx_length_errors = readl(base + XGMAC_MMC_RXLENGTHERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) storage->rx_missed_errors = readl(base + XGMAC_MMC_RXOVERFLOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) storage->tx_bytes = readl(base + XGMAC_MMC_TXOCTET_G_LO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) storage->tx_bytes |= (u64)(readl(base + XGMAC_MMC_TXOCTET_G_HI)) << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) count = readl(base + XGMAC_MMC_TXFRAME_GB_LO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) storage->tx_errors = count - readl(base + XGMAC_MMC_TXFRAME_G_LO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) storage->tx_packets = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) storage->tx_fifo_errors = readl(base + XGMAC_MMC_TXUNDERFLOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) writel(0, base + XGMAC_MMC_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) spin_unlock_bh(&priv->stats_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) static int xgmac_set_mac_address(struct net_device *dev, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) struct xgmac_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) void __iomem *ioaddr = priv->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) struct sockaddr *addr = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) if (!is_valid_ether_addr(addr->sa_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) return -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) static int xgmac_set_features(struct net_device *dev, netdev_features_t features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) u32 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) struct xgmac_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) void __iomem *ioaddr = priv->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) netdev_features_t changed = dev->features ^ features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) if (!(changed & NETIF_F_RXCSUM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) ctrl = readl(ioaddr + XGMAC_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) if (features & NETIF_F_RXCSUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) ctrl |= XGMAC_CONTROL_IPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) ctrl &= ~XGMAC_CONTROL_IPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) writel(ctrl, ioaddr + XGMAC_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) static const struct net_device_ops xgmac_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) .ndo_open = xgmac_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) .ndo_start_xmit = xgmac_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) .ndo_stop = xgmac_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) .ndo_change_mtu = xgmac_change_mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) .ndo_set_rx_mode = xgmac_set_rx_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) .ndo_tx_timeout = xgmac_tx_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) .ndo_get_stats64 = xgmac_get_stats64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) .ndo_poll_controller = xgmac_poll_controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) .ndo_set_mac_address = xgmac_set_mac_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) .ndo_set_features = xgmac_set_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) static int xgmac_ethtool_get_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) cmd->base.autoneg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) cmd->base.duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) cmd->base.speed = 10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) static void xgmac_get_pauseparam(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) struct ethtool_pauseparam *pause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) struct xgmac_priv *priv = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) pause->rx_pause = priv->rx_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) pause->tx_pause = priv->tx_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) static int xgmac_set_pauseparam(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) struct ethtool_pauseparam *pause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) struct xgmac_priv *priv = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) if (pause->autoneg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) return xgmac_set_flow_ctrl(priv, pause->rx_pause, pause->tx_pause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) struct xgmac_stats {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) char stat_string[ETH_GSTRING_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) int stat_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) bool is_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) #define XGMAC_STAT(m) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) { #m, offsetof(struct xgmac_priv, xstats.m), false }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) #define XGMAC_HW_STAT(m, reg_offset) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) { #m, reg_offset, true }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) static const struct xgmac_stats xgmac_gstrings_stats[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) XGMAC_STAT(tx_frame_flushed),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) XGMAC_STAT(tx_payload_error),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) XGMAC_STAT(tx_ip_header_error),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) XGMAC_STAT(tx_local_fault),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) XGMAC_STAT(tx_remote_fault),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) XGMAC_STAT(tx_early),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) XGMAC_STAT(tx_process_stopped),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) XGMAC_STAT(tx_jabber),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) XGMAC_STAT(rx_buf_unav),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) XGMAC_STAT(rx_process_stopped),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) XGMAC_STAT(rx_payload_error),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) XGMAC_STAT(rx_ip_header_error),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) XGMAC_STAT(rx_da_filter_fail),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) XGMAC_STAT(fatal_bus_error),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) XGMAC_HW_STAT(rx_vlan, XGMAC_MMC_RXVLANFRAME),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) XGMAC_HW_STAT(tx_pause, XGMAC_MMC_TXPAUSEFRAME),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) XGMAC_HW_STAT(rx_pause, XGMAC_MMC_RXPAUSEFRAME),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) #define XGMAC_STATS_LEN ARRAY_SIZE(xgmac_gstrings_stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) static void xgmac_get_ethtool_stats(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) struct ethtool_stats *dummy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) u64 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) struct xgmac_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) void *p = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) for (i = 0; i < XGMAC_STATS_LEN; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) if (xgmac_gstrings_stats[i].is_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) *data++ = readl(priv->base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) xgmac_gstrings_stats[i].stat_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) *data++ = *(u32 *)(p +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) xgmac_gstrings_stats[i].stat_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) static int xgmac_get_sset_count(struct net_device *netdev, int sset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) switch (sset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) case ETH_SS_STATS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) return XGMAC_STATS_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) static void xgmac_get_strings(struct net_device *dev, u32 stringset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) u8 *p = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) switch (stringset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) case ETH_SS_STATS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) for (i = 0; i < XGMAC_STATS_LEN; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) memcpy(p, xgmac_gstrings_stats[i].stat_string,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) ETH_GSTRING_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) p += ETH_GSTRING_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) static void xgmac_get_wol(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) struct ethtool_wolinfo *wol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) struct xgmac_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) if (device_can_wakeup(priv->device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) wol->supported = WAKE_MAGIC | WAKE_UCAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) wol->wolopts = priv->wolopts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) static int xgmac_set_wol(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) struct ethtool_wolinfo *wol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) struct xgmac_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) u32 support = WAKE_MAGIC | WAKE_UCAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) if (!device_can_wakeup(priv->device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) if (wol->wolopts & ~support)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) priv->wolopts = wol->wolopts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) if (wol->wolopts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) device_set_wakeup_enable(priv->device, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) enable_irq_wake(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) device_set_wakeup_enable(priv->device, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) disable_irq_wake(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) static const struct ethtool_ops xgmac_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) .get_link = ethtool_op_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) .get_pauseparam = xgmac_get_pauseparam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) .set_pauseparam = xgmac_set_pauseparam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) .get_ethtool_stats = xgmac_get_ethtool_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) .get_strings = xgmac_get_strings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) .get_wol = xgmac_get_wol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) .set_wol = xgmac_set_wol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) .get_sset_count = xgmac_get_sset_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) .get_link_ksettings = xgmac_ethtool_get_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) * xgmac_probe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) * @pdev: platform device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) * Description: the driver is initialized through platform_device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) static int xgmac_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) struct net_device *ndev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) struct xgmac_priv *priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) u32 uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) if (!request_mem_region(res->start, resource_size(res), pdev->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) ndev = alloc_etherdev(sizeof(struct xgmac_priv));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) if (!ndev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) SET_NETDEV_DEV(ndev, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) platform_set_drvdata(pdev, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) ndev->netdev_ops = &xgmac_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) ndev->ethtool_ops = &xgmac_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) spin_lock_init(&priv->stats_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) priv->device = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) priv->dev = ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) priv->rx_pause = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) priv->tx_pause = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) priv->base = ioremap(res->start, resource_size(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) if (!priv->base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) netdev_err(ndev, "ioremap failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) goto err_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) uid = readl(priv->base + XGMAC_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) netdev_info(ndev, "h/w version is 0x%x\n", uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) /* Figure out how many valid mac address filter registers we have */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) writel(1, priv->base + XGMAC_ADDR_HIGH(31));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) if (readl(priv->base + XGMAC_ADDR_HIGH(31)) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) priv->max_macs = 31;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) priv->max_macs = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) writel(0, priv->base + XGMAC_DMA_INTR_ENA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) ndev->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) if (ndev->irq == -ENXIO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) netdev_err(ndev, "No irq resource\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) ret = ndev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) goto err_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) ret = request_irq(ndev->irq, xgmac_interrupt, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) dev_name(&pdev->dev), ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) netdev_err(ndev, "Could not request irq %d - ret %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) ndev->irq, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) goto err_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) priv->pmt_irq = platform_get_irq(pdev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) if (priv->pmt_irq == -ENXIO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) netdev_err(ndev, "No pmt irq resource\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) ret = priv->pmt_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) goto err_pmt_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) ret = request_irq(priv->pmt_irq, xgmac_pmt_interrupt, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) dev_name(&pdev->dev), ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) netdev_err(ndev, "Could not request irq %d - ret %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) priv->pmt_irq, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) goto err_pmt_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) device_set_wakeup_capable(&pdev->dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) if (device_can_wakeup(priv->device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) ndev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) NETIF_F_RXCSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) ndev->features |= ndev->hw_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) ndev->priv_flags |= IFF_UNICAST_FLT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) /* MTU range: 46 - 9000 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) ndev->max_mtu = XGMAC_MAX_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) /* Get the MAC address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) xgmac_get_mac_addr(priv->base, ndev->dev_addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) if (!is_valid_ether_addr(ndev->dev_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) netdev_warn(ndev, "MAC address %pM not valid",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) ndev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) netif_napi_add(ndev, &priv->napi, xgmac_poll, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) ret = register_netdev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) goto err_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) err_reg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) netif_napi_del(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) free_irq(priv->pmt_irq, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) err_pmt_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) free_irq(ndev->irq, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) err_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) iounmap(priv->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) err_io:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) free_netdev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) err_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) release_mem_region(res->start, resource_size(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) * xgmac_dvr_remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) * @pdev: platform device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) * Description: this function resets the TX/RX processes, disables the MAC RX/TX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) * changes the link status, releases the DMA descriptor rings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) * unregisters the MDIO bus and unmaps the allocated memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) static int xgmac_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) struct net_device *ndev = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) struct xgmac_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) xgmac_mac_disable(priv->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) /* Free the IRQ lines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) free_irq(ndev->irq, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) free_irq(priv->pmt_irq, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) unregister_netdev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) netif_napi_del(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) iounmap(priv->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) release_mem_region(res->start, resource_size(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) free_netdev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) unsigned int pmt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) if (mode & WAKE_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) if (mode & WAKE_UCAST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) writel(pmt, ioaddr + XGMAC_PMT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) static int xgmac_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) struct net_device *ndev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) struct xgmac_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) if (!ndev || !netif_running(ndev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) netif_device_detach(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) napi_disable(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) writel(0, priv->base + XGMAC_DMA_INTR_ENA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) if (device_may_wakeup(priv->device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) /* Stop TX/RX DMA Only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) value = readl(priv->base + XGMAC_DMA_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) writel(value, priv->base + XGMAC_DMA_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) xgmac_pmt(priv->base, priv->wolopts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) xgmac_mac_disable(priv->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) static int xgmac_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) struct net_device *ndev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) struct xgmac_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) void __iomem *ioaddr = priv->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) if (!netif_running(ndev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) xgmac_pmt(ioaddr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) /* Enable the MAC and DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) xgmac_mac_enable(ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) netif_device_attach(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) napi_enable(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) #endif /* CONFIG_PM_SLEEP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) static SIMPLE_DEV_PM_OPS(xgmac_pm_ops, xgmac_suspend, xgmac_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) static const struct of_device_id xgmac_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) { .compatible = "calxeda,hb-xgmac", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) MODULE_DEVICE_TABLE(of, xgmac_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) static struct platform_driver xgmac_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) .name = "calxedaxgmac",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) .of_match_table = xgmac_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) .pm = &xgmac_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) .probe = xgmac_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) .remove = xgmac_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) module_platform_driver(xgmac_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) MODULE_AUTHOR("Calxeda, Inc.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) MODULE_DESCRIPTION("Calxeda 10G XGMAC driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) MODULE_LICENSE("GPL v2");