Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/of_mdio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/of_net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/netlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/bpf_trace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <net/page_pool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <net/ip6_checksum.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #define NETSEC_REG_SOFT_RST			0x104
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #define NETSEC_REG_COM_INIT			0x120
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #define NETSEC_REG_TOP_STATUS			0x200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #define NETSEC_IRQ_RX				BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #define NETSEC_IRQ_TX				BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #define NETSEC_REG_TOP_INTEN			0x204
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #define NETSEC_REG_INTEN_SET			0x234
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #define NETSEC_REG_INTEN_CLR			0x238
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #define NETSEC_REG_NRM_TX_STATUS		0x400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #define NETSEC_REG_NRM_TX_INTEN			0x404
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #define NETSEC_REG_NRM_TX_INTEN_SET		0x428
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #define NETSEC_REG_NRM_TX_INTEN_CLR		0x42c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #define NRM_TX_ST_NTOWNR	BIT(17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #define NRM_TX_ST_TR_ERR	BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #define NRM_TX_ST_TXDONE	BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #define NRM_TX_ST_TMREXP	BIT(14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #define NETSEC_REG_NRM_RX_STATUS		0x440
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #define NETSEC_REG_NRM_RX_INTEN			0x444
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #define NETSEC_REG_NRM_RX_INTEN_SET		0x468
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #define NETSEC_REG_NRM_RX_INTEN_CLR		0x46c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #define NRM_RX_ST_RC_ERR	BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #define NRM_RX_ST_PKTCNT	BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #define NRM_RX_ST_TMREXP	BIT(14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #define NETSEC_REG_PKT_CMD_BUF			0xd0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #define NETSEC_REG_CLK_EN			0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #define NETSEC_REG_PKT_CTRL			0x140
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #define NETSEC_REG_DMA_TMR_CTRL			0x20c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #define NETSEC_REG_F_TAIKI_MC_VER		0x22c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #define NETSEC_REG_F_TAIKI_VER			0x230
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #define NETSEC_REG_DMA_HM_CTRL			0x214
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #define NETSEC_REG_DMA_MH_CTRL			0x220
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #define NETSEC_REG_ADDR_DIS_CORE		0x218
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define NETSEC_REG_DMAC_HM_CMD_BUF		0x210
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #define NETSEC_REG_DMAC_MH_CMD_BUF		0x21c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #define NETSEC_REG_NRM_TX_PKTCNT		0x410
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #define NETSEC_REG_NRM_TX_DONE_PKTCNT		0x414
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #define NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT	0x418
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #define NETSEC_REG_NRM_TX_TMR			0x41c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) #define NETSEC_REG_NRM_RX_PKTCNT		0x454
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #define NETSEC_REG_NRM_RX_RXINT_PKTCNT		0x458
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #define NETSEC_REG_NRM_TX_TXINT_TMR		0x420
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) #define NETSEC_REG_NRM_RX_RXINT_TMR		0x460
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) #define NETSEC_REG_NRM_RX_TMR			0x45c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) #define NETSEC_REG_NRM_TX_DESC_START_UP		0x434
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) #define NETSEC_REG_NRM_TX_DESC_START_LW		0x408
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) #define NETSEC_REG_NRM_RX_DESC_START_UP		0x474
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) #define NETSEC_REG_NRM_RX_DESC_START_LW		0x448
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) #define NETSEC_REG_NRM_TX_CONFIG		0x430
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) #define NETSEC_REG_NRM_RX_CONFIG		0x470
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) #define MAC_REG_STATUS				0x1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) #define MAC_REG_DATA				0x11c0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) #define MAC_REG_CMD				0x11c4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) #define MAC_REG_FLOW_TH				0x11cc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) #define MAC_REG_INTF_SEL			0x11d4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) #define MAC_REG_DESC_INIT			0x11fc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) #define MAC_REG_DESC_SOFT_RST			0x1204
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) #define NETSEC_REG_MODE_TRANS_COMP_STATUS	0x500
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) #define GMAC_REG_MCR				0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) #define GMAC_REG_MFFR				0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) #define GMAC_REG_GAR				0x0010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) #define GMAC_REG_GDR				0x0014
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) #define GMAC_REG_FCR				0x0018
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) #define GMAC_REG_BMR				0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) #define GMAC_REG_RDLAR				0x100c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) #define GMAC_REG_TDLAR				0x1010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) #define GMAC_REG_OMR				0x1018
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) #define MHZ(n)		((n) * 1000 * 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) #define NETSEC_TX_SHIFT_OWN_FIELD		31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) #define NETSEC_TX_SHIFT_LD_FIELD		30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) #define NETSEC_TX_SHIFT_DRID_FIELD		24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) #define NETSEC_TX_SHIFT_PT_FIELD		21
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) #define NETSEC_TX_SHIFT_TDRID_FIELD		16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) #define NETSEC_TX_SHIFT_CC_FIELD		15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) #define NETSEC_TX_SHIFT_FS_FIELD		9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) #define NETSEC_TX_LAST				8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) #define NETSEC_TX_SHIFT_CO			7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) #define NETSEC_TX_SHIFT_SO			6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) #define NETSEC_TX_SHIFT_TRS_FIELD		4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) #define NETSEC_RX_PKT_OWN_FIELD			31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) #define NETSEC_RX_PKT_LD_FIELD			30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) #define NETSEC_RX_PKT_SDRID_FIELD		24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) #define NETSEC_RX_PKT_FR_FIELD			23
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) #define NETSEC_RX_PKT_ER_FIELD			21
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) #define NETSEC_RX_PKT_ERR_FIELD			16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) #define NETSEC_RX_PKT_TDRID_FIELD		12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) #define NETSEC_RX_PKT_FS_FIELD			9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) #define NETSEC_RX_PKT_LS_FIELD			8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) #define NETSEC_RX_PKT_CO_FIELD			6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) #define NETSEC_RX_PKT_ERR_MASK			3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) #define NETSEC_MAX_TX_PKT_LEN			1518
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) #define NETSEC_MAX_TX_JUMBO_PKT_LEN		9018
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) #define NETSEC_RING_GMAC			15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) #define NETSEC_RING_MAX				2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) #define NETSEC_TCP_SEG_LEN_MAX			1460
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) #define NETSEC_TCP_JUMBO_SEG_LEN_MAX		8960
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) #define NETSEC_RX_CKSUM_NOTAVAIL		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) #define NETSEC_RX_CKSUM_OK			1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) #define NETSEC_RX_CKSUM_NG			2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) #define NETSEC_TOP_IRQ_REG_CODE_LOAD_END	BIT(20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) #define NETSEC_IRQ_TRANSITION_COMPLETE		BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) #define NETSEC_MODE_TRANS_COMP_IRQ_N2T		BIT(20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) #define NETSEC_MODE_TRANS_COMP_IRQ_T2N		BIT(19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) #define NETSEC_INT_PKTCNT_MAX			2047
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) #define NETSEC_FLOW_START_TH_MAX		95
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) #define NETSEC_FLOW_STOP_TH_MAX			95
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) #define NETSEC_FLOW_PAUSE_TIME_MIN		5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) #define NETSEC_CLK_EN_REG_DOM_ALL		0x3f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) #define NETSEC_PKT_CTRL_REG_MODE_NRM		BIT(28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) #define NETSEC_PKT_CTRL_REG_EN_JUMBO		BIT(27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) #define NETSEC_PKT_CTRL_REG_LOG_CHKSUM_ER	BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) #define NETSEC_PKT_CTRL_REG_LOG_HD_INCOMPLETE	BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) #define NETSEC_PKT_CTRL_REG_LOG_HD_ER		BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) #define NETSEC_PKT_CTRL_REG_DRP_NO_MATCH	BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) #define NETSEC_CLK_EN_REG_DOM_G			BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) #define NETSEC_CLK_EN_REG_DOM_C			BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) #define NETSEC_CLK_EN_REG_DOM_D			BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) #define NETSEC_COM_INIT_REG_DB			BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) #define NETSEC_COM_INIT_REG_CLS			BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) #define NETSEC_COM_INIT_REG_ALL			(NETSEC_COM_INIT_REG_CLS | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 						 NETSEC_COM_INIT_REG_DB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) #define NETSEC_SOFT_RST_REG_RESET		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) #define NETSEC_SOFT_RST_REG_RUN			BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) #define NETSEC_DMA_CTRL_REG_STOP		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) #define MH_CTRL__MODE_TRANS			BIT(20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) #define NETSEC_GMAC_CMD_ST_READ			0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) #define NETSEC_GMAC_CMD_ST_WRITE		BIT(28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) #define NETSEC_GMAC_CMD_ST_BUSY			BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) #define NETSEC_GMAC_BMR_REG_COMMON		0x00412080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) #define NETSEC_GMAC_BMR_REG_RESET		0x00020181
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) #define NETSEC_GMAC_BMR_REG_SWR			0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) #define NETSEC_GMAC_OMR_REG_ST			BIT(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) #define NETSEC_GMAC_OMR_REG_SR			BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) #define NETSEC_GMAC_MCR_REG_IBN			BIT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) #define NETSEC_GMAC_MCR_REG_CST			BIT(25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) #define NETSEC_GMAC_MCR_REG_JE			BIT(20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) #define NETSEC_MCR_PS				BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) #define NETSEC_GMAC_MCR_REG_FES			BIT(14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) #define NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON	0x0000280c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) #define NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON	0x0001a00c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) #define NETSEC_FCR_RFE				BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) #define NETSEC_FCR_TFE				BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) #define NETSEC_GMAC_GAR_REG_GW			BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) #define NETSEC_GMAC_GAR_REG_GB			BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) #define NETSEC_GMAC_GAR_REG_SHIFT_PA		11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) #define NETSEC_GMAC_GAR_REG_SHIFT_GR		6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) #define GMAC_REG_SHIFT_CR_GAR			2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) #define NETSEC_GMAC_GAR_REG_CR_25_35_MHZ	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) #define NETSEC_GMAC_GAR_REG_CR_35_60_MHZ	3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) #define NETSEC_GMAC_GAR_REG_CR_60_100_MHZ	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) #define NETSEC_GMAC_GAR_REG_CR_100_150_MHZ	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) #define NETSEC_GMAC_GAR_REG_CR_150_250_MHZ	4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) #define NETSEC_GMAC_GAR_REG_CR_250_300_MHZ	5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) #define NETSEC_GMAC_RDLAR_REG_COMMON		0x18000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) #define NETSEC_GMAC_TDLAR_REG_COMMON		0x1c000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) #define NETSEC_REG_NETSEC_VER_F_TAIKI		0x50000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) #define NETSEC_REG_DESC_RING_CONFIG_CFG_UP	BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) #define NETSEC_REG_DESC_RING_CONFIG_CH_RST	BIT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) #define NETSEC_REG_DESC_TMR_MODE		4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) #define NETSEC_REG_DESC_ENDIAN			0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) #define NETSEC_MAC_DESC_SOFT_RST_SOFT_RST	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) #define NETSEC_MAC_DESC_INIT_REG_INIT		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) #define NETSEC_EEPROM_MAC_ADDRESS		0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) #define NETSEC_EEPROM_HM_ME_ADDRESS_H		0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) #define NETSEC_EEPROM_HM_ME_ADDRESS_L		0x0C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) #define NETSEC_EEPROM_HM_ME_SIZE		0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) #define NETSEC_EEPROM_MH_ME_ADDRESS_H		0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) #define NETSEC_EEPROM_MH_ME_ADDRESS_L		0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) #define NETSEC_EEPROM_MH_ME_SIZE		0x1C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) #define NETSEC_EEPROM_PKT_ME_ADDRESS		0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) #define NETSEC_EEPROM_PKT_ME_SIZE		0x24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) #define DESC_NUM	256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) #define NETSEC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) #define NETSEC_RXBUF_HEADROOM (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 			       NET_IP_ALIGN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) #define NETSEC_RX_BUF_NON_DATA (NETSEC_RXBUF_HEADROOM + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 				SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) #define NETSEC_RX_BUF_SIZE	(PAGE_SIZE - NETSEC_RX_BUF_NON_DATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) #define DESC_SZ	sizeof(struct netsec_de)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) #define NETSEC_F_NETSEC_VER_MAJOR_NUM(x)	((x) & 0xffff0000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) #define NETSEC_XDP_PASS          0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) #define NETSEC_XDP_CONSUMED      BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) #define NETSEC_XDP_TX            BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) #define NETSEC_XDP_REDIR         BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) enum ring_id {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	NETSEC_RING_TX = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	NETSEC_RING_RX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) enum buf_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	TYPE_NETSEC_SKB = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	TYPE_NETSEC_XDP_TX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	TYPE_NETSEC_XDP_NDO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) struct netsec_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 		struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		struct xdp_frame *xdpf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	u16 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	u8 buf_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) struct netsec_desc_ring {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	dma_addr_t desc_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	struct netsec_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	u16 head, tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	u16 xdp_xmit; /* netsec_xdp_xmit packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	struct page_pool *page_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	struct xdp_rxq_info xdp_rxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	spinlock_t lock; /* XDP tx queue locking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) struct netsec_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	struct netsec_desc_ring desc_ring[NETSEC_RING_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	struct ethtool_coalesce et_coalesce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	struct bpf_prog *xdp_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	spinlock_t reglock; /* protect reg access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	struct napi_struct napi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	phy_interface_t phy_interface;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	struct net_device *ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	struct device_node *phy_np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	struct phy_device *phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	struct mii_bus *mii_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	void __iomem *ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	void __iomem *eeprom_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	u32 msg_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	u32 freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	u32 phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	bool rx_cksum_offload_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) struct netsec_de { /* Netsec Descriptor layout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	u32 attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	u32 data_buf_addr_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	u32 data_buf_addr_lw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	u32 buf_len_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) struct netsec_tx_pkt_ctrl {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	u16 tcp_seg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	bool tcp_seg_offload_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	bool cksum_offload_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) struct netsec_rx_pkt_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	int rx_cksum_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	int err_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	bool err_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) static void netsec_write(struct netsec_priv *priv, u32 reg_addr, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	writel(val, priv->ioaddr + reg_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) static u32 netsec_read(struct netsec_priv *priv, u32 reg_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	return readl(priv->ioaddr + reg_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) /************* MDIO BUS OPS FOLLOW *************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) #define TIMEOUT_SPINS_MAC		1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) #define TIMEOUT_SECONDARY_MS_MAC	100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) static u32 netsec_clk_type(u32 freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	if (freq < MHZ(35))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		return NETSEC_GMAC_GAR_REG_CR_25_35_MHZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	if (freq < MHZ(60))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 		return NETSEC_GMAC_GAR_REG_CR_35_60_MHZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	if (freq < MHZ(100))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		return NETSEC_GMAC_GAR_REG_CR_60_100_MHZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	if (freq < MHZ(150))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		return NETSEC_GMAC_GAR_REG_CR_100_150_MHZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	if (freq < MHZ(250))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		return NETSEC_GMAC_GAR_REG_CR_150_250_MHZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	return NETSEC_GMAC_GAR_REG_CR_250_300_MHZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) static int netsec_wait_while_busy(struct netsec_priv *priv, u32 addr, u32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	u32 timeout = TIMEOUT_SPINS_MAC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	while (--timeout && netsec_read(priv, addr) & mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	if (timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	timeout = TIMEOUT_SECONDARY_MS_MAC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	while (--timeout && netsec_read(priv, addr) & mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		usleep_range(1000, 2000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	if (timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) static int netsec_mac_write(struct netsec_priv *priv, u32 addr, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	netsec_write(priv, MAC_REG_DATA, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	return netsec_wait_while_busy(priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 				      MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) static int netsec_mac_read(struct netsec_priv *priv, u32 addr, u32 *read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	ret = netsec_wait_while_busy(priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 				     MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	*read = netsec_read(priv, MAC_REG_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) static int netsec_mac_wait_while_busy(struct netsec_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 				      u32 addr, u32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	u32 timeout = TIMEOUT_SPINS_MAC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	int ret, data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		ret = netsec_mac_read(priv, addr, &data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	} while (--timeout && (data & mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	if (timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	timeout = TIMEOUT_SECONDARY_MS_MAC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		usleep_range(1000, 2000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		ret = netsec_mac_read(priv, addr, &data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	} while (--timeout && (data & mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	if (timeout && !ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) static int netsec_mac_update_to_phy_state(struct netsec_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	struct phy_device *phydev = priv->ndev->phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	u32 value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	value = phydev->duplex ? NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 				 NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	if (phydev->speed != SPEED_1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		value |= NETSEC_MCR_PS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	if (priv->phy_interface != PHY_INTERFACE_MODE_GMII &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	    phydev->speed == SPEED_100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		value |= NETSEC_GMAC_MCR_REG_FES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	value |= NETSEC_GMAC_MCR_REG_CST | NETSEC_GMAC_MCR_REG_JE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	if (phy_interface_mode_is_rgmii(priv->phy_interface))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		value |= NETSEC_GMAC_MCR_REG_IBN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	if (netsec_mac_write(priv, GMAC_REG_MCR, value))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) static int netsec_phy_write(struct mii_bus *bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 			    int phy_addr, int reg, u16 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	struct netsec_priv *priv = bus->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	if (netsec_mac_write(priv, GMAC_REG_GDR, val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	if (netsec_mac_write(priv, GMAC_REG_GAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 			     phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 			     reg << NETSEC_GMAC_GAR_REG_SHIFT_GR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 			     NETSEC_GMAC_GAR_REG_GW | NETSEC_GMAC_GAR_REG_GB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 			     (netsec_clk_type(priv->freq) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 			      GMAC_REG_SHIFT_CR_GAR)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	status = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 					    NETSEC_GMAC_GAR_REG_GB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	/* Developerbox implements RTL8211E PHY and there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	 * a compatibility problem with F_GMAC4.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	 * RTL8211E expects MDC clock must be kept toggling for several
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	 * clock cycle with MDIO high before entering the IDLE state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	 * To meet this requirement, netsec driver needs to issue dummy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	 * read(e.g. read PHYID1(offset 0x2) register) right after write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	netsec_phy_read(bus, phy_addr, MII_PHYSID1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	struct netsec_priv *priv = bus->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	u32 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	if (netsec_mac_write(priv, GMAC_REG_GAR, NETSEC_GMAC_GAR_REG_GB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 			     phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 			     reg_addr << NETSEC_GMAC_GAR_REG_SHIFT_GR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 			     (netsec_clk_type(priv->freq) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 			      GMAC_REG_SHIFT_CR_GAR)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 		return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	ret = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 					 NETSEC_GMAC_GAR_REG_GB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	ret = netsec_mac_read(priv, GMAC_REG_GDR, &data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	return data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) /************* ETHTOOL_OPS FOLLOW *************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) static void netsec_et_get_drvinfo(struct net_device *net_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 				  struct ethtool_drvinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	strlcpy(info->driver, "netsec", sizeof(info->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	strlcpy(info->bus_info, dev_name(net_device->dev.parent),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		sizeof(info->bus_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) static int netsec_et_get_coalesce(struct net_device *net_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 				  struct ethtool_coalesce *et_coalesce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	struct netsec_priv *priv = netdev_priv(net_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	*et_coalesce = priv->et_coalesce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) static int netsec_et_set_coalesce(struct net_device *net_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 				  struct ethtool_coalesce *et_coalesce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	struct netsec_priv *priv = netdev_priv(net_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	priv->et_coalesce = *et_coalesce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	if (priv->et_coalesce.tx_coalesce_usecs < 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		priv->et_coalesce.tx_coalesce_usecs = 50;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	if (priv->et_coalesce.tx_max_coalesced_frames < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		priv->et_coalesce.tx_max_coalesced_frames = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	netsec_write(priv, NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		     priv->et_coalesce.tx_max_coalesced_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	netsec_write(priv, NETSEC_REG_NRM_TX_TXINT_TMR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		     priv->et_coalesce.tx_coalesce_usecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TXDONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TMREXP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	if (priv->et_coalesce.rx_coalesce_usecs < 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		priv->et_coalesce.rx_coalesce_usecs = 50;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	if (priv->et_coalesce.rx_max_coalesced_frames < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 		priv->et_coalesce.rx_max_coalesced_frames = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_PKTCNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		     priv->et_coalesce.rx_max_coalesced_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_TMR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		     priv->et_coalesce.rx_coalesce_usecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_PKTCNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_TMREXP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) static u32 netsec_et_get_msglevel(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	struct netsec_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	return priv->msg_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) static void netsec_et_set_msglevel(struct net_device *dev, u32 datum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	struct netsec_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	priv->msg_enable = datum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) static const struct ethtool_ops netsec_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 				     ETHTOOL_COALESCE_MAX_FRAMES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	.get_drvinfo		= netsec_et_get_drvinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	.get_link		= ethtool_op_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	.get_coalesce		= netsec_et_get_coalesce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	.set_coalesce		= netsec_et_set_coalesce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	.get_msglevel		= netsec_et_get_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	.set_msglevel		= netsec_et_set_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) /************* NETDEV_OPS FOLLOW *************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) static void netsec_set_rx_de(struct netsec_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 			     struct netsec_desc_ring *dring, u16 idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 			     const struct netsec_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	struct netsec_de *de = dring->vaddr + DESC_SZ * idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		   (1 << NETSEC_RX_PKT_FS_FIELD) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		   (1 << NETSEC_RX_PKT_LS_FIELD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	if (idx == DESC_NUM - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		attr |= (1 << NETSEC_RX_PKT_LD_FIELD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	de->buf_len_info = desc->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	de->attr = attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	dma_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	dring->desc[idx].dma_addr = desc->dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	dring->desc[idx].addr = desc->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	dring->desc[idx].len = desc->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) static bool netsec_clean_tx_dring(struct netsec_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	struct netsec_de *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	int tail = dring->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	unsigned int bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	int cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	spin_lock(&dring->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	entry = dring->vaddr + DESC_SZ * tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	while (!(entry->attr & (1U << NETSEC_TX_SHIFT_OWN_FIELD)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	       cnt < DESC_NUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		struct netsec_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 		int eop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		desc = &dring->desc[tail];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		eop = (entry->attr >> NETSEC_TX_LAST) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		dma_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		/* if buf_type is either TYPE_NETSEC_SKB or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		 * TYPE_NETSEC_XDP_NDO we mapped it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		if (desc->buf_type != TYPE_NETSEC_XDP_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 			dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 					 DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		if (!eop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 			goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		if (desc->buf_type == TYPE_NETSEC_SKB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 			bytes += desc->skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 			dev_kfree_skb(desc->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 			bytes += desc->xdpf->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 			xdp_return_frame(desc->xdpf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		/* clean up so netsec_uninit_pkt_dring() won't free the skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		 * again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		*desc = (struct netsec_desc){};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		/* entry->attr is not going to be accessed by the NIC until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		 * netsec_set_tx_de() is called. No need for a dma_wmb() here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		entry->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		/* move tail ahead */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		dring->tail = (tail + 1) % DESC_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		tail = dring->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		entry = dring->vaddr + DESC_SZ * tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	spin_unlock(&dring->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	if (!cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	/* reading the register clears the irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	netsec_read(priv, NETSEC_REG_NRM_TX_DONE_PKTCNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	priv->ndev->stats.tx_packets += cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	priv->ndev->stats.tx_bytes += bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	netdev_completed_queue(priv->ndev, cnt, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) static void netsec_process_tx(struct netsec_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	struct net_device *ndev = priv->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	bool cleaned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	cleaned = netsec_clean_tx_dring(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	if (cleaned && netif_queue_stopped(ndev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		/* Make sure we update the value, anyone stopping the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		 * after this will read the proper consumer idx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		netif_wake_queue(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) static void *netsec_alloc_rx_data(struct netsec_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 				  dma_addr_t *dma_handle, u16 *desc_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	page = page_pool_dev_alloc_pages(dring->page_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	/* We allocate the same buffer length for XDP and non-XDP cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	 * page_pool API will map the whole page, skip what's needed for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	 * network payloads and/or XDP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	*dma_handle = page_pool_get_dma_addr(page) + NETSEC_RXBUF_HEADROOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	/* Make sure the incoming payload fits in the page for XDP and non-XDP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	 * cases and reserve enough space for headroom + skb_shared_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	*desc_len = NETSEC_RX_BUF_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	return page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) static void netsec_rx_fill(struct netsec_priv *priv, u16 from, u16 num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	u16 idx = from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	while (num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		netsec_set_rx_de(priv, dring, idx, &dring->desc[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		if (idx >= DESC_NUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 			idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 		num--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) static void netsec_xdp_ring_tx_db(struct netsec_priv *priv, u16 pkts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	if (likely(pkts))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, pkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) static void netsec_finalize_xdp_rx(struct netsec_priv *priv, u32 xdp_res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 				   u16 pkts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	if (xdp_res & NETSEC_XDP_REDIR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		xdp_do_flush_map();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	if (xdp_res & NETSEC_XDP_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		netsec_xdp_ring_tx_db(priv, pkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) static void netsec_set_tx_de(struct netsec_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 			     struct netsec_desc_ring *dring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 			     const struct netsec_tx_pkt_ctrl *tx_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 			     const struct netsec_desc *desc, void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	int idx = dring->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	struct netsec_de *de;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	u32 attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	de = dring->vaddr + (DESC_SZ * idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	attr = (1 << NETSEC_TX_SHIFT_OWN_FIELD) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	       (1 << NETSEC_TX_SHIFT_PT_FIELD) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	       (NETSEC_RING_GMAC << NETSEC_TX_SHIFT_TDRID_FIELD) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	       (1 << NETSEC_TX_SHIFT_FS_FIELD) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	       (1 << NETSEC_TX_LAST) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	       (tx_ctrl->cksum_offload_flag << NETSEC_TX_SHIFT_CO) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	       (tx_ctrl->tcp_seg_offload_flag << NETSEC_TX_SHIFT_SO) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	       (1 << NETSEC_TX_SHIFT_TRS_FIELD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	if (idx == DESC_NUM - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		attr |= (1 << NETSEC_TX_SHIFT_LD_FIELD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	de->attr = attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	dring->desc[idx] = *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	if (desc->buf_type == TYPE_NETSEC_SKB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		dring->desc[idx].skb = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	else if (desc->buf_type == TYPE_NETSEC_XDP_TX ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		 desc->buf_type == TYPE_NETSEC_XDP_NDO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		dring->desc[idx].xdpf = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	/* move head ahead */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	dring->head = (dring->head + 1) % DESC_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) /* The current driver only supports 1 Txq, this should run under spin_lock() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) static u32 netsec_xdp_queue_one(struct netsec_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 				struct xdp_frame *xdpf, bool is_ndo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	struct page *page = virt_to_page(xdpf->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	struct netsec_tx_pkt_ctrl tx_ctrl = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	struct netsec_desc tx_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	dma_addr_t dma_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	u16 filled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	if (tx_ring->head >= tx_ring->tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		filled = tx_ring->head - tx_ring->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		filled = tx_ring->head + DESC_NUM - tx_ring->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	if (DESC_NUM - filled <= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		return NETSEC_XDP_CONSUMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	if (is_ndo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		/* this is for ndo_xdp_xmit, the buffer needs mapping before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		 * sending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		dma_handle = dma_map_single(priv->dev, xdpf->data, xdpf->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 					    DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		if (dma_mapping_error(priv->dev, dma_handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 			return NETSEC_XDP_CONSUMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		tx_desc.buf_type = TYPE_NETSEC_XDP_NDO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		/* This is the device Rx buffer from page_pool. No need to remap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		 * just sync and send it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		struct netsec_desc_ring *rx_ring =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 			&priv->desc_ring[NETSEC_RING_RX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		enum dma_data_direction dma_dir =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 			page_pool_get_dma_dir(rx_ring->page_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		dma_handle = page_pool_get_dma_addr(page) + xdpf->headroom +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 			sizeof(*xdpf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		dma_sync_single_for_device(priv->dev, dma_handle, xdpf->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 					   dma_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		tx_desc.buf_type = TYPE_NETSEC_XDP_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	tx_desc.dma_addr = dma_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	tx_desc.addr = xdpf->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	tx_desc.len = xdpf->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	netdev_sent_queue(priv->ndev, xdpf->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	netsec_set_tx_de(priv, tx_ring, &tx_ctrl, &tx_desc, xdpf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	return NETSEC_XDP_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) static u32 netsec_xdp_xmit_back(struct netsec_priv *priv, struct xdp_buff *xdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	u32 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	if (unlikely(!xdpf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		return NETSEC_XDP_CONSUMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	spin_lock(&tx_ring->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	ret = netsec_xdp_queue_one(priv, xdpf, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	spin_unlock(&tx_ring->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 			  struct xdp_buff *xdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	unsigned int sync, len = xdp->data_end - xdp->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	u32 ret = NETSEC_XDP_PASS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	u32 act;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	act = bpf_prog_run_xdp(prog, xdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	/* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	sync = xdp->data_end - xdp->data_hard_start - NETSEC_RXBUF_HEADROOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	sync = max(sync, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	switch (act) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	case XDP_PASS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		ret = NETSEC_XDP_PASS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	case XDP_TX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		ret = netsec_xdp_xmit_back(priv, xdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		if (ret != NETSEC_XDP_TX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 			page = virt_to_head_page(xdp->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 			page_pool_put_page(dring->page_pool, page, sync, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	case XDP_REDIRECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		err = xdp_do_redirect(priv->ndev, xdp, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 			ret = NETSEC_XDP_REDIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 			ret = NETSEC_XDP_CONSUMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 			page = virt_to_head_page(xdp->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 			page_pool_put_page(dring->page_pool, page, sync, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		bpf_warn_invalid_xdp_action(act);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	case XDP_ABORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		trace_xdp_exception(priv->ndev, prog, act);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		fallthrough;	/* handle aborts by dropping packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	case XDP_DROP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		ret = NETSEC_XDP_CONSUMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		page = virt_to_head_page(xdp->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		page_pool_put_page(dring->page_pool, page, sync, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) static int netsec_process_rx(struct netsec_priv *priv, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	struct net_device *ndev = priv->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	struct netsec_rx_pkt_info rx_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	enum dma_data_direction dma_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	struct bpf_prog *xdp_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	struct xdp_buff xdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	u16 xdp_xmit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	u32 xdp_act = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	int done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	xdp.rxq = &dring->xdp_rxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	xdp.frame_sz = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	xdp_prog = READ_ONCE(priv->xdp_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	dma_dir = page_pool_get_dma_dir(dring->page_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	while (done < budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		u16 idx = dring->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		struct netsec_desc *desc = &dring->desc[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		struct page *page = virt_to_page(desc->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		u32 xdp_result = NETSEC_XDP_PASS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		u16 pkt_len, desc_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		dma_addr_t dma_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		void *buf_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 			/* reading the register clears the irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 			netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		/* This  barrier is needed to keep us from reading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		 * any other fields out of the netsec_de until we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		 * verified the descriptor has been written back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		dma_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		done++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		pkt_len = de->buf_len_info >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		rx_info.err_code = (de->attr >> NETSEC_RX_PKT_ERR_FIELD) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 			NETSEC_RX_PKT_ERR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		rx_info.err_flag = (de->attr >> NETSEC_RX_PKT_ER_FIELD) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		if (rx_info.err_flag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 			netif_err(priv, drv, priv->ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 				  "%s: rx fail err(%d)\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 				  rx_info.err_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 			ndev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 			dring->tail = (dring->tail + 1) % DESC_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 			/* reuse buffer page frag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 			netsec_rx_fill(priv, idx, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		rx_info.rx_cksum_result =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 			(de->attr >> NETSEC_RX_PKT_CO_FIELD) & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		/* allocate a fresh buffer and map it to the hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		 * This will eventually replace the old buffer in the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		if (unlikely(!buf_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		dma_sync_single_for_cpu(priv->dev, desc->dma_addr, pkt_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 					dma_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		prefetch(desc->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		xdp.data_hard_start = desc->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		xdp.data = desc->addr + NETSEC_RXBUF_HEADROOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		xdp_set_data_meta_invalid(&xdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		xdp.data_end = xdp.data + pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		if (xdp_prog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 			xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 			if (xdp_result != NETSEC_XDP_PASS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 				xdp_act |= xdp_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 				if (xdp_result == NETSEC_XDP_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 					xdp_xmit++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 				goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		skb = build_skb(desc->addr, desc->len + NETSEC_RX_BUF_NON_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		if (unlikely(!skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 			/* If skb fails recycle_direct will either unmap and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 			 * free the page or refill the cache depending on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 			 * cache state. Since we paid the allocation cost if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 			 * building an skb fails try to put the page into cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 			page_pool_put_page(dring->page_pool, page, pkt_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 					   true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 			netif_err(priv, drv, priv->ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 				  "rx failed to build skb\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		page_pool_release_page(dring->page_pool, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		skb_reserve(skb, xdp.data - xdp.data_hard_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		skb_put(skb, xdp.data_end - xdp.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		skb->protocol = eth_type_trans(skb, priv->ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		if (priv->rx_cksum_offload_flag &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		    rx_info.rx_cksum_result == NETSEC_RX_CKSUM_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 			skb->ip_summed = CHECKSUM_UNNECESSARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		if (skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 			napi_gro_receive(&priv->napi, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		if (skb || xdp_result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 			ndev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 			ndev->stats.rx_bytes += xdp.data_end - xdp.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		/* Update the descriptor with fresh buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 		desc->len = desc_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		desc->dma_addr = dma_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		desc->addr = buf_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		netsec_rx_fill(priv, idx, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		dring->tail = (dring->tail + 1) % DESC_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	netsec_finalize_xdp_rx(priv, xdp_act, xdp_xmit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	return done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) static int netsec_napi_poll(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	struct netsec_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	int done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	priv = container_of(napi, struct netsec_priv, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	netsec_process_tx(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	done = netsec_process_rx(priv, budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	if (done < budget && napi_complete_done(napi, done)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		spin_lock_irqsave(&priv->reglock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		netsec_write(priv, NETSEC_REG_INTEN_SET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 			     NETSEC_IRQ_RX | NETSEC_IRQ_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		spin_unlock_irqrestore(&priv->reglock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	return done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) static int netsec_desc_used(struct netsec_desc_ring *dring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	int used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	if (dring->head >= dring->tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		used = dring->head - dring->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		used = dring->head + DESC_NUM - dring->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	return used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) static int netsec_check_stop_tx(struct netsec_priv *priv, int used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	/* keep tail from touching the queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	if (DESC_NUM - used < 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 		netif_stop_queue(priv->ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		/* Make sure we read the updated value in case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		 * descriptors got freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 		used = netsec_desc_used(dring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		if (DESC_NUM - used < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 			return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		netif_wake_queue(priv->ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 					    struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	struct netsec_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	struct netsec_tx_pkt_ctrl tx_ctrl = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	struct netsec_desc tx_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	u16 tso_seg_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	int filled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	spin_lock_bh(&dring->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	filled = netsec_desc_used(dring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	if (netsec_check_stop_tx(priv, filled)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		spin_unlock_bh(&dring->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 		net_warn_ratelimited("%s %s Tx queue full\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 				     dev_name(priv->dev), ndev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	if (skb->ip_summed == CHECKSUM_PARTIAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		tx_ctrl.cksum_offload_flag = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	if (skb_is_gso(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		tso_seg_len = skb_shinfo(skb)->gso_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	if (tso_seg_len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		if (skb->protocol == htons(ETH_P_IP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 			ip_hdr(skb)->tot_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 			tcp_hdr(skb)->check =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 				~tcp_v4_check(0, ip_hdr(skb)->saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 					      ip_hdr(skb)->daddr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 			tcp_v6_gso_csum_prep(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		tx_ctrl.tcp_seg_offload_flag = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 		tx_ctrl.tcp_seg_len = tso_seg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	tx_desc.dma_addr = dma_map_single(priv->dev, skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 					  skb_headlen(skb), DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		spin_unlock_bh(&dring->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		netif_err(priv, drv, priv->ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 			  "%s: DMA mapping failed\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 		ndev->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 		return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	tx_desc.addr = skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	tx_desc.len = skb_headlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	tx_desc.buf_type = TYPE_NETSEC_SKB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	skb_tx_timestamp(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	netdev_sent_queue(priv->ndev, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	spin_unlock_bh(&dring->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	struct netsec_desc_ring *dring = &priv->desc_ring[id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	struct netsec_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	u16 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	if (!dring->vaddr || !dring->desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	for (idx = 0; idx < DESC_NUM; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 		desc = &dring->desc[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		if (!desc->addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		if (id == NETSEC_RING_RX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 			struct page *page = virt_to_page(desc->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 			page_pool_put_full_page(dring->page_pool, page, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		} else if (id == NETSEC_RING_TX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 			dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 					 DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 			dev_kfree_skb(desc->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	/* Rx is currently using page_pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	if (id == NETSEC_RING_RX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 		if (xdp_rxq_info_is_reg(&dring->xdp_rxq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 			xdp_rxq_info_unreg(&dring->xdp_rxq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		page_pool_destroy(dring->page_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	memset(dring->vaddr, 0, DESC_SZ * DESC_NUM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	dring->head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	dring->tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	if (id == NETSEC_RING_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 		netdev_reset_queue(priv->ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) static void netsec_free_dring(struct netsec_priv *priv, int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	struct netsec_desc_ring *dring = &priv->desc_ring[id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	if (dring->vaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 		dma_free_coherent(priv->dev, DESC_SZ * DESC_NUM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 				  dring->vaddr, dring->desc_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		dring->vaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	kfree(dring->desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	dring->desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	struct netsec_desc_ring *dring = &priv->desc_ring[id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	dring->vaddr = dma_alloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 					  &dring->desc_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	if (!dring->vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	dring->desc = kcalloc(DESC_NUM, sizeof(*dring->desc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	if (!dring->desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	netsec_free_dring(priv, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) static void netsec_setup_tx_dring(struct netsec_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	for (i = 0; i < DESC_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		struct netsec_de *de;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		de = dring->vaddr + (DESC_SZ * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 		/* de->attr is not going to be accessed by the NIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 		 * until netsec_set_tx_de() is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 		 * No need for a dma_wmb() here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 		de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) static int netsec_setup_rx_dring(struct netsec_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	struct page_pool_params pp_params = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 		.order = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 		/* internal DMA mapping in page_pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		.pool_size = DESC_NUM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 		.nid = NUMA_NO_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 		.dev = priv->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 		.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 		.offset = NETSEC_RXBUF_HEADROOM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 		.max_len = NETSEC_RX_BUF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	dring->page_pool = page_pool_create(&pp_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	if (IS_ERR(dring->page_pool)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 		err = PTR_ERR(dring->page_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 		dring->page_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	err = xdp_rxq_info_reg(&dring->xdp_rxq, priv->ndev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	err = xdp_rxq_info_reg_mem_model(&dring->xdp_rxq, MEM_TYPE_PAGE_POOL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 					 dring->page_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	for (i = 0; i < DESC_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		struct netsec_desc *desc = &dring->desc[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 		dma_addr_t dma_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 		void *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		u16 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 		buf = netsec_alloc_rx_data(priv, &dma_handle, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 		if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 			err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 			goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		desc->dma_addr = dma_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		desc->addr = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 		desc->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	netsec_rx_fill(priv, 0, DESC_NUM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 					   u32 addr_h, u32 addr_l, u32 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	u64 base = (u64)addr_h << 32 | addr_l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	void __iomem *ucode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	ucode = ioremap(base, size * sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	if (!ucode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	for (i = 0; i < size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 		netsec_write(priv, reg, readl(ucode + i * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	iounmap(ucode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) static int netsec_netdev_load_microcode(struct netsec_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	u32 addr_h, addr_l, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_H);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_L);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	size = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_HM_CMD_BUF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 					      addr_h, addr_l, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_H);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_L);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	size = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_MH_CMD_BUF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 					      addr_h, addr_l, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	addr_h = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_ADDRESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	size = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_PKT_CMD_BUF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 					      addr_h, addr_l, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) static int netsec_reset_hardware(struct netsec_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 				 bool load_ucode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	/* stop DMA engines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	if (!netsec_read(priv, NETSEC_REG_ADDR_DIS_CORE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 		netsec_write(priv, NETSEC_REG_DMA_HM_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 			     NETSEC_DMA_CTRL_REG_STOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 		netsec_write(priv, NETSEC_REG_DMA_MH_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 			     NETSEC_DMA_CTRL_REG_STOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 		while (netsec_read(priv, NETSEC_REG_DMA_HM_CTRL) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 		       NETSEC_DMA_CTRL_REG_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 			cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 		while (netsec_read(priv, NETSEC_REG_DMA_MH_CTRL) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 		       NETSEC_DMA_CTRL_REG_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 			cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RUN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	netsec_write(priv, NETSEC_REG_COM_INIT, NETSEC_COM_INIT_REG_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	while (netsec_read(priv, NETSEC_REG_COM_INIT) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	/* set desc_start addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_UP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 		     upper_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_LW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 		     lower_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_UP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 		     upper_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_LW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 		     lower_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	/* set normal tx dring ring config */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	netsec_write(priv, NETSEC_REG_NRM_TX_CONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 		     1 << NETSEC_REG_DESC_ENDIAN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	netsec_write(priv, NETSEC_REG_NRM_RX_CONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 		     1 << NETSEC_REG_DESC_ENDIAN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	if (load_ucode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 		err = netsec_netdev_load_microcode(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 			netif_err(priv, probe, priv->ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 				  "%s: failed to load microcode (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 				  __func__, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	/* start DMA engines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	netsec_write(priv, NETSEC_REG_DMA_TMR_CTRL, priv->freq / 1000000 - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	netsec_write(priv, NETSEC_REG_ADDR_DIS_CORE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	usleep_range(1000, 2000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	if (!(netsec_read(priv, NETSEC_REG_TOP_STATUS) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	      NETSEC_TOP_IRQ_REG_CODE_LOAD_END)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 		netif_err(priv, probe, priv->ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 			  "microengine start failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	netsec_write(priv, NETSEC_REG_TOP_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 		     NETSEC_TOP_IRQ_REG_CODE_LOAD_END);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	value = NETSEC_PKT_CTRL_REG_MODE_NRM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	if (priv->ndev->mtu > ETH_DATA_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 		value |= NETSEC_PKT_CTRL_REG_EN_JUMBO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	/* change to normal mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	netsec_write(priv, NETSEC_REG_DMA_MH_CTRL, MH_CTRL__MODE_TRANS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	netsec_write(priv, NETSEC_REG_PKT_CTRL, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	while ((netsec_read(priv, NETSEC_REG_MODE_TRANS_COMP_STATUS) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		NETSEC_MODE_TRANS_COMP_IRQ_T2N) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	/* clear any pending EMPTY/ERR irq status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	/* Disable TX & RX intr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) static int netsec_start_gmac(struct netsec_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	struct phy_device *phydev = priv->ndev->phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	u32 value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	if (phydev->speed != SPEED_1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 		value = (NETSEC_GMAC_MCR_REG_CST |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 			 NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	if (netsec_mac_write(priv, GMAC_REG_MCR, value))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 		return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	if (netsec_mac_write(priv, GMAC_REG_BMR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 			     NETSEC_GMAC_BMR_REG_RESET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 		return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	/* Wait soft reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	usleep_range(1000, 5000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	ret = netsec_mac_read(priv, GMAC_REG_BMR, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	if (value & NETSEC_GMAC_BMR_REG_SWR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	netsec_write(priv, MAC_REG_DESC_SOFT_RST, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	if (netsec_wait_while_busy(priv, MAC_REG_DESC_SOFT_RST, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 		return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	netsec_write(priv, MAC_REG_DESC_INIT, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	if (netsec_wait_while_busy(priv, MAC_REG_DESC_INIT, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 		return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	if (netsec_mac_write(priv, GMAC_REG_BMR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 			     NETSEC_GMAC_BMR_REG_COMMON))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 		return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	if (netsec_mac_write(priv, GMAC_REG_RDLAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 			     NETSEC_GMAC_RDLAR_REG_COMMON))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 		return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	if (netsec_mac_write(priv, GMAC_REG_TDLAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 			     NETSEC_GMAC_TDLAR_REG_COMMON))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 		return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	if (netsec_mac_write(priv, GMAC_REG_MFFR, 0x80000001))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 		return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	ret = netsec_mac_update_to_phy_state(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	value |= NETSEC_GMAC_OMR_REG_SR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	value |= NETSEC_GMAC_OMR_REG_ST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	netsec_et_set_coalesce(priv->ndev, &priv->et_coalesce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	if (netsec_mac_write(priv, GMAC_REG_OMR, value))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 		return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) static int netsec_stop_gmac(struct netsec_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	value &= ~NETSEC_GMAC_OMR_REG_SR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	value &= ~NETSEC_GMAC_OMR_REG_ST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	/* disable all interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	return netsec_mac_write(priv, GMAC_REG_OMR, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) static void netsec_phy_adjust_link(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	struct netsec_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	if (ndev->phydev->link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 		netsec_start_gmac(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 		netsec_stop_gmac(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	phy_print_status(ndev->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) static irqreturn_t netsec_irq_handler(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	struct netsec_priv *priv = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	u32 val, status = netsec_read(priv, NETSEC_REG_TOP_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	/* Disable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	if (status & NETSEC_IRQ_TX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 		val = netsec_read(priv, NETSEC_REG_NRM_TX_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 		netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	if (status & NETSEC_IRQ_RX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 		val = netsec_read(priv, NETSEC_REG_NRM_RX_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 		netsec_write(priv, NETSEC_REG_NRM_RX_STATUS, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	spin_lock_irqsave(&priv->reglock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	netsec_write(priv, NETSEC_REG_INTEN_CLR, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	spin_unlock_irqrestore(&priv->reglock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	napi_schedule(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) static int netsec_netdev_open(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	struct netsec_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	pm_runtime_get_sync(priv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	netsec_setup_tx_dring(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	ret = netsec_setup_rx_dring(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 		netif_err(priv, probe, priv->ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 			  "%s: fail setup ring\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 		goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	ret = request_irq(priv->ndev->irq, netsec_irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 			  IRQF_SHARED, "netsec", priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 		netif_err(priv, drv, priv->ndev, "request_irq failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 		goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	if (dev_of_node(priv->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 		if (!of_phy_connect(priv->ndev, priv->phy_np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 				    netsec_phy_adjust_link, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 				    priv->phy_interface)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 			netif_err(priv, link, priv->ndev, "missing PHY\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 			ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 			goto err3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		ret = phy_connect_direct(priv->ndev, priv->phydev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 					 netsec_phy_adjust_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 					 priv->phy_interface);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 			netif_err(priv, link, priv->ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 				  "phy_connect_direct() failed (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 			goto err3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	phy_start(ndev->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	netsec_start_gmac(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	napi_enable(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	netif_start_queue(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	/* Enable TX+RX intr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	netsec_write(priv, NETSEC_REG_INTEN_SET, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) err3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	free_irq(priv->ndev->irq, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	pm_runtime_put_sync(priv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) static int netsec_netdev_stop(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	struct netsec_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	netif_stop_queue(priv->ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	dma_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	napi_disable(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	netsec_stop_gmac(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	free_irq(priv->ndev->irq, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	netsec_uninit_pkt_dring(priv, NETSEC_RING_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	phy_stop(ndev->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	phy_disconnect(ndev->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	ret = netsec_reset_hardware(priv, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	pm_runtime_put_sync(priv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) static int netsec_netdev_init(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	struct netsec_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	u16 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	BUILD_BUG_ON_NOT_POWER_OF_2(DESC_NUM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	ret = netsec_alloc_dring(priv, NETSEC_RING_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	ret = netsec_alloc_dring(priv, NETSEC_RING_RX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 		goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	/* set phy power down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 			 data | BMCR_PDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	ret = netsec_reset_hardware(priv, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 		goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	/* Restore phy power state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	spin_lock_init(&priv->desc_ring[NETSEC_RING_TX].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	spin_lock_init(&priv->desc_ring[NETSEC_RING_RX].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	netsec_free_dring(priv, NETSEC_RING_RX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	netsec_free_dring(priv, NETSEC_RING_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) static void netsec_netdev_uninit(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	struct netsec_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	netsec_free_dring(priv, NETSEC_RING_RX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	netsec_free_dring(priv, NETSEC_RING_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) static int netsec_netdev_set_features(struct net_device *ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 				      netdev_features_t features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	struct netsec_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	priv->rx_cksum_offload_flag = !!(features & NETIF_F_RXCSUM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) static int netsec_xdp_xmit(struct net_device *ndev, int n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 			   struct xdp_frame **frames, u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	struct netsec_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	int drops = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 	spin_lock(&tx_ring->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	for (i = 0; i < n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 		struct xdp_frame *xdpf = frames[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 		int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 		err = netsec_xdp_queue_one(priv, xdpf, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 		if (err != NETSEC_XDP_TX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 			xdp_return_frame_rx_napi(xdpf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 			drops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 			tx_ring->xdp_xmit++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 	spin_unlock(&tx_ring->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 	if (unlikely(flags & XDP_XMIT_FLUSH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 		netsec_xdp_ring_tx_db(priv, tx_ring->xdp_xmit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 		tx_ring->xdp_xmit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	return n - drops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) static int netsec_xdp_setup(struct netsec_priv *priv, struct bpf_prog *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 			    struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 	struct net_device *dev = priv->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	struct bpf_prog *old_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	/* For now just support only the usual MTU sized frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	if (prog && dev->mtu > 1500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 		NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	if (netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 		netsec_netdev_stop(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	/* Detach old prog, if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	old_prog = xchg(&priv->xdp_prog, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	if (old_prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 		bpf_prog_put(old_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	if (netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 		netsec_netdev_open(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) static int netsec_xdp(struct net_device *ndev, struct netdev_bpf *xdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	struct netsec_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	switch (xdp->command) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	case XDP_SETUP_PROG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 		return netsec_xdp_setup(priv, xdp->prog, xdp->extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) static const struct net_device_ops netsec_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	.ndo_init		= netsec_netdev_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 	.ndo_uninit		= netsec_netdev_uninit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	.ndo_open		= netsec_netdev_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	.ndo_stop		= netsec_netdev_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	.ndo_start_xmit		= netsec_netdev_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	.ndo_set_features	= netsec_netdev_set_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	.ndo_set_mac_address    = eth_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 	.ndo_validate_addr	= eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	.ndo_do_ioctl		= phy_do_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 	.ndo_xdp_xmit		= netsec_xdp_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	.ndo_bpf		= netsec_xdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) static int netsec_of_probe(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 			   struct netsec_priv *priv, u32 *phy_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 	err = of_get_phy_mode(pdev->dev.of_node, &priv->phy_interface);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 		dev_err(&pdev->dev, "missing required property 'phy-mode'\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 	priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	if (!priv->phy_np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 		dev_err(&pdev->dev, "missing required property 'phy-handle'\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 	*phy_addr = of_mdio_parse_addr(&pdev->dev, priv->phy_np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 	priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	if (IS_ERR(priv->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 		dev_err(&pdev->dev, "phy_ref_clk not found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 		return PTR_ERR(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 	priv->freq = clk_get_rate(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) static int netsec_acpi_probe(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 			     struct netsec_priv *priv, u32 *phy_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	if (!IS_ENABLED(CONFIG_ACPI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	/* ACPI systems are assumed to configure the PHY in firmware, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	 * there is really no need to discover the PHY mode from the DSDT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	 * Since firmware is known to exist in the field that configures the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	 * PHY correctly but passes the wrong mode string in the phy-mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	 * device property, we have no choice but to ignore it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	priv->phy_interface = PHY_INTERFACE_MODE_NA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 	ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 		dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 			"missing required property 'phy-channel'\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	ret = device_property_read_u32(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 				       "socionext,phy-clock-frequency",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 				       &priv->freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 		dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 			"missing required property 'socionext,phy-clock-frequency'\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) static void netsec_unregister_mdio(struct netsec_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	struct phy_device *phydev = priv->phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	if (!dev_of_node(priv->dev) && phydev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 		phy_device_remove(phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 		phy_device_free(phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 	mdiobus_unregister(priv->mii_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 	struct mii_bus *bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 	bus = devm_mdiobus_alloc(priv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	if (!bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 	snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(priv->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 	bus->priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 	bus->name = "SNI NETSEC MDIO";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 	bus->read = netsec_phy_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 	bus->write = netsec_phy_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 	bus->parent = priv->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 	priv->mii_bus = bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 	if (dev_of_node(priv->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 		struct device_node *mdio_node, *parent = dev_of_node(priv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 		mdio_node = of_get_child_by_name(parent, "mdio");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 		if (mdio_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 			parent = mdio_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 			/* older f/w doesn't populate the mdio subnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 			 * allow relaxed upgrade of f/w in due time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 			dev_info(priv->dev, "Upgrade f/w for mdio subnode!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 		ret = of_mdiobus_register(bus, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 		of_node_put(mdio_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 			dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 		/* Mask out all PHYs from auto probing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 		bus->phy_mask = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 		ret = mdiobus_register(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 			dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 		priv->phydev = get_phy_device(bus, phy_addr, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 		if (IS_ERR(priv->phydev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 			ret = PTR_ERR(priv->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 			dev_err(priv->dev, "get_phy_device err(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 			priv->phydev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 			return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 		ret = phy_device_register(priv->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 			mdiobus_unregister(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 			dev_err(priv->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 				"phy_device_register err(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) static int netsec_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	struct resource *mmio_res, *eeprom_res, *irq_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 	u8 *mac, macbuf[ETH_ALEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 	struct netsec_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	u32 hw_ver, phy_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 	struct net_device *ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 	mmio_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 	if (!mmio_res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 		dev_err(&pdev->dev, "No MMIO resource found.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 	eeprom_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 	if (!eeprom_res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 		dev_info(&pdev->dev, "No EEPROM resource found.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 	if (!irq_res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 		dev_err(&pdev->dev, "No IRQ resource found.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 	ndev = alloc_etherdev(sizeof(*priv));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 	if (!ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 	priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	spin_lock_init(&priv->reglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 	SET_NETDEV_DEV(ndev, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	platform_set_drvdata(pdev, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	ndev->irq = irq_res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 	priv->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 	priv->ndev = ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 			   NETIF_MSG_LINK | NETIF_MSG_PROBE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 				    resource_size(mmio_res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 	if (!priv->ioaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 		dev_err(&pdev->dev, "devm_ioremap() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 		ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 		goto free_ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 	priv->eeprom_base = devm_ioremap(&pdev->dev, eeprom_res->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 					 resource_size(eeprom_res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	if (!priv->eeprom_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 		dev_err(&pdev->dev, "devm_ioremap() failed for EEPROM\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 		ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 		goto free_ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 	mac = device_get_mac_address(&pdev->dev, macbuf, sizeof(macbuf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 	if (mac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 		ether_addr_copy(ndev->dev_addr, mac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 	if (priv->eeprom_base &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	    (!mac || !is_valid_ether_addr(ndev->dev_addr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 		void __iomem *macp = priv->eeprom_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 					NETSEC_EEPROM_MAC_ADDRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 		ndev->dev_addr[0] = readb(macp + 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 		ndev->dev_addr[1] = readb(macp + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 		ndev->dev_addr[2] = readb(macp + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 		ndev->dev_addr[3] = readb(macp + 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 		ndev->dev_addr[4] = readb(macp + 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 		ndev->dev_addr[5] = readb(macp + 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	if (!is_valid_ether_addr(ndev->dev_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 		dev_warn(&pdev->dev, "No MAC address found, using random\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 		eth_hw_addr_random(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	if (dev_of_node(&pdev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 		ret = netsec_of_probe(pdev, priv, &phy_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 		ret = netsec_acpi_probe(pdev, priv, &phy_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 		goto free_ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 	priv->phy_addr = phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	if (!priv->freq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 		dev_err(&pdev->dev, "missing PHY reference clock frequency\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 		ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 		goto free_ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 	/* default for throughput */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 	priv->et_coalesce.rx_coalesce_usecs = 500;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 	priv->et_coalesce.rx_max_coalesced_frames = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 	priv->et_coalesce.tx_coalesce_usecs = 500;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 	priv->et_coalesce.tx_max_coalesced_frames = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 	ret = device_property_read_u32(&pdev->dev, "max-frame-size",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 				       &ndev->max_mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 		ndev->max_mtu = ETH_DATA_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	/* runtime_pm coverage just for probe, open/close also cover it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 	pm_runtime_enable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 	pm_runtime_get_sync(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	hw_ver = netsec_read(priv, NETSEC_REG_F_TAIKI_VER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	/* this driver only supports F_TAIKI style NETSEC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 	if (NETSEC_F_NETSEC_VER_MAJOR_NUM(hw_ver) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 	    NETSEC_F_NETSEC_VER_MAJOR_NUM(NETSEC_REG_NETSEC_VER_F_TAIKI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 		ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 		goto pm_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	dev_info(&pdev->dev, "hardware revision %d.%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 		 hw_ver >> 16, hw_ver & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 	netif_napi_add(ndev, &priv->napi, netsec_napi_poll, NAPI_POLL_WEIGHT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	ndev->netdev_ops = &netsec_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 	ndev->ethtool_ops = &netsec_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 	ndev->features |= NETIF_F_HIGHDMA | NETIF_F_RXCSUM | NETIF_F_GSO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 				NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 	ndev->hw_features = ndev->features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 	priv->rx_cksum_offload_flag = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	ret = netsec_register_mdio(priv, phy_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 		goto unreg_napi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 		dev_warn(&pdev->dev, "Failed to set DMA mask\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 	ret = register_netdev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 		netif_err(priv, probe, ndev, "register_netdev() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 		goto unreg_mii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 	pm_runtime_put_sync(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) unreg_mii:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 	netsec_unregister_mdio(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) unreg_napi:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 	netif_napi_del(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) pm_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 	pm_runtime_put_sync(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 	pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) free_ndev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 	free_netdev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 	dev_err(&pdev->dev, "init failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) static int netsec_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 	struct netsec_priv *priv = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 	unregister_netdev(priv->ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 	netsec_unregister_mdio(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 	netif_napi_del(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 	pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	free_netdev(priv->ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) static int netsec_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 	struct netsec_priv *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 	netsec_write(priv, NETSEC_REG_CLK_EN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 	clk_disable_unprepare(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) static int netsec_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 	struct netsec_priv *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 	clk_prepare_enable(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 	netsec_write(priv, NETSEC_REG_CLK_EN, NETSEC_CLK_EN_REG_DOM_D |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 					       NETSEC_CLK_EN_REG_DOM_C |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 					       NETSEC_CLK_EN_REG_DOM_G);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) static const struct dev_pm_ops netsec_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 	SET_RUNTIME_PM_OPS(netsec_runtime_suspend, netsec_runtime_resume, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) static const struct of_device_id netsec_dt_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 	{ .compatible = "socionext,synquacer-netsec" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 	{ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) MODULE_DEVICE_TABLE(of, netsec_dt_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) #ifdef CONFIG_ACPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) static const struct acpi_device_id netsec_acpi_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 	{ "SCX0001" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 	{ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) MODULE_DEVICE_TABLE(acpi, netsec_acpi_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) static struct platform_driver netsec_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 	.probe	= netsec_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 	.remove	= netsec_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 		.name = "netsec",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 		.pm = &netsec_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 		.of_match_table = netsec_dt_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 		.acpi_match_table = ACPI_PTR(netsec_acpi_ids),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) module_platform_driver(netsec_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) MODULE_AUTHOR("Jassi Brar <jaswinder.singh@linaro.org>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) MODULE_DESCRIPTION("NETSEC Ethernet driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) MODULE_LICENSE("GPL");