Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * linux/drivers/net/ethernet/ethoc.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (C) 2007-2008 Avionic Design Development GmbH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Copyright (C) 2008-2009 Avionic Design GmbH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * Written by Thierry Reding <thierry.reding@avionic-design.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/mii.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/phy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/of_net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <net/ethoc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) static int buffer_size = 0x8000; /* 32 KBytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) module_param(buffer_size, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) /* register offsets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #define	MODER		0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #define	INT_SOURCE	0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #define	INT_MASK	0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #define	IPGT		0x0c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #define	IPGR1		0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #define	IPGR2		0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #define	PACKETLEN	0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #define	COLLCONF	0x1c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #define	TX_BD_NUM	0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #define	CTRLMODER	0x24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #define	MIIMODER	0x28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #define	MIICOMMAND	0x2c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #define	MIIADDRESS	0x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #define	MIITX_DATA	0x34
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #define	MIIRX_DATA	0x38
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #define	MIISTATUS	0x3c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #define	MAC_ADDR0	0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #define	MAC_ADDR1	0x44
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #define	ETH_HASH0	0x48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #define	ETH_HASH1	0x4c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #define	ETH_TXCTRL	0x50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #define	ETH_END		0x54
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) /* mode register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #define	MODER_RXEN	(1 <<  0) /* receive enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #define	MODER_TXEN	(1 <<  1) /* transmit enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #define	MODER_NOPRE	(1 <<  2) /* no preamble */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #define	MODER_BRO	(1 <<  3) /* broadcast address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #define	MODER_IAM	(1 <<  4) /* individual address mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define	MODER_PRO	(1 <<  5) /* promiscuous mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #define	MODER_IFG	(1 <<  6) /* interframe gap for incoming frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #define	MODER_LOOP	(1 <<  7) /* loopback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #define	MODER_NBO	(1 <<  8) /* no back-off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #define	MODER_EDE	(1 <<  9) /* excess defer enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #define	MODER_FULLD	(1 << 10) /* full duplex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #define	MODER_RESET	(1 << 11) /* FIXME: reset (undocumented) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #define	MODER_DCRC	(1 << 12) /* delayed CRC enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #define	MODER_CRC	(1 << 13) /* CRC enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) #define	MODER_HUGE	(1 << 14) /* huge packets enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) #define	MODER_PAD	(1 << 15) /* padding enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #define	MODER_RSM	(1 << 16) /* receive small packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) /* interrupt source and mask registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #define	INT_MASK_TXF	(1 << 0) /* transmit frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) #define	INT_MASK_TXE	(1 << 1) /* transmit error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) #define	INT_MASK_RXF	(1 << 2) /* receive frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) #define	INT_MASK_RXE	(1 << 3) /* receive error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) #define	INT_MASK_BUSY	(1 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) #define	INT_MASK_TXC	(1 << 5) /* transmit control frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) #define	INT_MASK_RXC	(1 << 6) /* receive control frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) #define	INT_MASK_TX	(INT_MASK_TXF | INT_MASK_TXE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) #define	INT_MASK_RX	(INT_MASK_RXF | INT_MASK_RXE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) #define	INT_MASK_ALL ( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 		INT_MASK_TXF | INT_MASK_TXE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 		INT_MASK_RXF | INT_MASK_RXE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 		INT_MASK_TXC | INT_MASK_RXC | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 		INT_MASK_BUSY \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) /* packet length register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) #define	PACKETLEN_MIN(min)		(((min) & 0xffff) << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) #define	PACKETLEN_MAX(max)		(((max) & 0xffff) <<  0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) #define	PACKETLEN_MIN_MAX(min, max)	(PACKETLEN_MIN(min) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 					PACKETLEN_MAX(max))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) /* transmit buffer number register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) #define	TX_BD_NUM_VAL(x)	(((x) <= 0x80) ? (x) : 0x80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) /* control module mode register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) #define	CTRLMODER_PASSALL	(1 << 0) /* pass all receive frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) #define	CTRLMODER_RXFLOW	(1 << 1) /* receive control flow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) #define	CTRLMODER_TXFLOW	(1 << 2) /* transmit control flow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) /* MII mode register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) #define	MIIMODER_CLKDIV(x)	((x) & 0xfe) /* needs to be an even number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) #define	MIIMODER_NOPRE		(1 << 8) /* no preamble */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) /* MII command register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) #define	MIICOMMAND_SCAN		(1 << 0) /* scan status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) #define	MIICOMMAND_READ		(1 << 1) /* read status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) #define	MIICOMMAND_WRITE	(1 << 2) /* write control data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) /* MII address register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) #define	MIIADDRESS_FIAD(x)		(((x) & 0x1f) << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) #define	MIIADDRESS_RGAD(x)		(((x) & 0x1f) << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) #define	MIIADDRESS_ADDR(phy, reg)	(MIIADDRESS_FIAD(phy) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 					MIIADDRESS_RGAD(reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) /* MII transmit data register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) #define	MIITX_DATA_VAL(x)	((x) & 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) /* MII receive data register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) #define	MIIRX_DATA_VAL(x)	((x) & 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) /* MII status register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) #define	MIISTATUS_LINKFAIL	(1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) #define	MIISTATUS_BUSY		(1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) #define	MIISTATUS_INVALID	(1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) /* TX buffer descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) #define	TX_BD_CS		(1 <<  0) /* carrier sense lost */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) #define	TX_BD_DF		(1 <<  1) /* defer indication */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) #define	TX_BD_LC		(1 <<  2) /* late collision */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) #define	TX_BD_RL		(1 <<  3) /* retransmission limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) #define	TX_BD_RETRY_MASK	(0x00f0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) #define	TX_BD_RETRY(x)		(((x) & 0x00f0) >>  4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) #define	TX_BD_UR		(1 <<  8) /* transmitter underrun */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) #define	TX_BD_CRC		(1 << 11) /* TX CRC enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) #define	TX_BD_PAD		(1 << 12) /* pad enable for short packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) #define	TX_BD_WRAP		(1 << 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) #define	TX_BD_IRQ		(1 << 14) /* interrupt request enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) #define	TX_BD_READY		(1 << 15) /* TX buffer ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) #define	TX_BD_LEN(x)		(((x) & 0xffff) << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) #define	TX_BD_LEN_MASK		(0xffff << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) #define	TX_BD_STATS		(TX_BD_CS | TX_BD_DF | TX_BD_LC | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 				TX_BD_RL | TX_BD_RETRY_MASK | TX_BD_UR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) /* RX buffer descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) #define	RX_BD_LC	(1 <<  0) /* late collision */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) #define	RX_BD_CRC	(1 <<  1) /* RX CRC error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) #define	RX_BD_SF	(1 <<  2) /* short frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) #define	RX_BD_TL	(1 <<  3) /* too long */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) #define	RX_BD_DN	(1 <<  4) /* dribble nibble */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) #define	RX_BD_IS	(1 <<  5) /* invalid symbol */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) #define	RX_BD_OR	(1 <<  6) /* receiver overrun */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) #define	RX_BD_MISS	(1 <<  7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) #define	RX_BD_CF	(1 <<  8) /* control frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) #define	RX_BD_WRAP	(1 << 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) #define	RX_BD_IRQ	(1 << 14) /* interrupt request enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) #define	RX_BD_EMPTY	(1 << 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) #define	RX_BD_LEN(x)	(((x) & 0xffff) << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) #define	RX_BD_STATS	(RX_BD_LC | RX_BD_CRC | RX_BD_SF | RX_BD_TL | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 			RX_BD_DN | RX_BD_IS | RX_BD_OR | RX_BD_MISS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) #define	ETHOC_BUFSIZ		1536
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) #define	ETHOC_ZLEN		64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) #define	ETHOC_BD_BASE		0x400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) #define	ETHOC_TIMEOUT		(HZ / 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) #define	ETHOC_MII_TIMEOUT	(1 + (HZ / 5))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177)  * struct ethoc - driver-private device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178)  * @iobase:	pointer to I/O memory region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179)  * @membase:	pointer to buffer memory region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180)  * @big_endian: just big or little (endian)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181)  * @num_bd:	number of buffer descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182)  * @num_tx:	number of send buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183)  * @cur_tx:	last send buffer written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184)  * @dty_tx:	last buffer actually sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185)  * @num_rx:	number of receive buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186)  * @cur_rx:	current receive buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187)  * @vma:        pointer to array of virtual memory addresses for buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188)  * @netdev:	pointer to network device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189)  * @napi:	NAPI structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190)  * @msg_enable:	device state flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191)  * @lock:	device lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192)  * @mdio:	MDIO bus for PHY access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193)  * @clk:	clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194)  * @phy_id:	address of attached PHY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195)  * @old_link:	previous link info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196)  * @old_duplex: previous duplex info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) struct ethoc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	void __iomem *iobase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	void __iomem *membase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	bool big_endian;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	unsigned int num_bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	unsigned int num_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	unsigned int cur_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	unsigned int dty_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	unsigned int num_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	unsigned int cur_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	void **vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	struct net_device *netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	struct napi_struct napi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	u32 msg_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	struct mii_bus *mdio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	s8 phy_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	int old_link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	int old_duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228)  * struct ethoc_bd - buffer descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229)  * @stat:	buffer statistics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230)  * @addr:	physical memory address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) struct ethoc_bd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	u32 stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	u32 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) static inline u32 ethoc_read(struct ethoc *dev, loff_t offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	if (dev->big_endian)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 		return ioread32be(dev->iobase + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 		return ioread32(dev->iobase + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) static inline void ethoc_write(struct ethoc *dev, loff_t offset, u32 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	if (dev->big_endian)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 		iowrite32be(data, dev->iobase + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 		iowrite32(data, dev->iobase + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) static inline void ethoc_read_bd(struct ethoc *dev, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 		struct ethoc_bd *bd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	bd->stat = ethoc_read(dev, offset + 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	bd->addr = ethoc_read(dev, offset + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) static inline void ethoc_write_bd(struct ethoc *dev, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		const struct ethoc_bd *bd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	ethoc_write(dev, offset + 0, bd->stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	ethoc_write(dev, offset + 4, bd->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) static inline void ethoc_enable_irq(struct ethoc *dev, u32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	u32 imask = ethoc_read(dev, INT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	imask |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	ethoc_write(dev, INT_MASK, imask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) static inline void ethoc_disable_irq(struct ethoc *dev, u32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	u32 imask = ethoc_read(dev, INT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	imask &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	ethoc_write(dev, INT_MASK, imask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) static inline void ethoc_ack_irq(struct ethoc *dev, u32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	ethoc_write(dev, INT_SOURCE, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) static inline void ethoc_enable_rx_and_tx(struct ethoc *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	u32 mode = ethoc_read(dev, MODER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	mode |= MODER_RXEN | MODER_TXEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	ethoc_write(dev, MODER, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) static inline void ethoc_disable_rx_and_tx(struct ethoc *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	u32 mode = ethoc_read(dev, MODER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	mode &= ~(MODER_RXEN | MODER_TXEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	ethoc_write(dev, MODER, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) static int ethoc_init_ring(struct ethoc *dev, unsigned long mem_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	struct ethoc_bd bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	void *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	dev->cur_tx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	dev->dty_tx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	dev->cur_rx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	ethoc_write(dev, TX_BD_NUM, dev->num_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	/* setup transmission buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	bd.addr = mem_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	bd.stat = TX_BD_IRQ | TX_BD_CRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	vma = dev->membase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	for (i = 0; i < dev->num_tx; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		if (i == dev->num_tx - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 			bd.stat |= TX_BD_WRAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		ethoc_write_bd(dev, i, &bd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		bd.addr += ETHOC_BUFSIZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		dev->vma[i] = vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 		vma += ETHOC_BUFSIZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	bd.stat = RX_BD_EMPTY | RX_BD_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	for (i = 0; i < dev->num_rx; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		if (i == dev->num_rx - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 			bd.stat |= RX_BD_WRAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 		ethoc_write_bd(dev, dev->num_tx + i, &bd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 		bd.addr += ETHOC_BUFSIZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		dev->vma[dev->num_tx + i] = vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 		vma += ETHOC_BUFSIZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) static int ethoc_reset(struct ethoc *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	u32 mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	/* TODO: reset controller? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	ethoc_disable_rx_and_tx(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	/* TODO: setup registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	/* enable FCS generation and automatic padding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	mode = ethoc_read(dev, MODER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	mode |= MODER_CRC | MODER_PAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	ethoc_write(dev, MODER, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	/* set full-duplex mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	mode = ethoc_read(dev, MODER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	mode |= MODER_FULLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	ethoc_write(dev, MODER, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	ethoc_write(dev, IPGT, 0x15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	ethoc_ack_irq(dev, INT_MASK_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	ethoc_enable_irq(dev, INT_MASK_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	ethoc_enable_rx_and_tx(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) static unsigned int ethoc_update_rx_stats(struct ethoc *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 		struct ethoc_bd *bd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	struct net_device *netdev = dev->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	unsigned int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	if (bd->stat & RX_BD_TL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		dev_err(&netdev->dev, "RX: frame too long\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		netdev->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		ret++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	if (bd->stat & RX_BD_SF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		dev_err(&netdev->dev, "RX: frame too short\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		netdev->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		ret++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	if (bd->stat & RX_BD_DN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		dev_err(&netdev->dev, "RX: dribble nibble\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		netdev->stats.rx_frame_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	if (bd->stat & RX_BD_CRC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		dev_err(&netdev->dev, "RX: wrong CRC\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		netdev->stats.rx_crc_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		ret++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	if (bd->stat & RX_BD_OR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 		dev_err(&netdev->dev, "RX: overrun\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		netdev->stats.rx_over_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		ret++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	if (bd->stat & RX_BD_MISS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 		netdev->stats.rx_missed_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	if (bd->stat & RX_BD_LC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		dev_err(&netdev->dev, "RX: late collision\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		netdev->stats.collisions++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		ret++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) static int ethoc_rx(struct net_device *dev, int limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	struct ethoc *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	for (count = 0; count < limit; ++count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		unsigned int entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 		struct ethoc_bd bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		entry = priv->num_tx + priv->cur_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		ethoc_read_bd(priv, entry, &bd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		if (bd.stat & RX_BD_EMPTY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 			ethoc_ack_irq(priv, INT_MASK_RX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 			/* If packet (interrupt) came in between checking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 			 * BD_EMTPY and clearing the interrupt source, then we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 			 * risk missing the packet as the RX interrupt won't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 			 * trigger right away when we reenable it; hence, check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 			 * BD_EMTPY here again to make sure there isn't such a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 			 * packet waiting for us...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 			ethoc_read_bd(priv, entry, &bd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 			if (bd.stat & RX_BD_EMPTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		if (ethoc_update_rx_stats(priv, &bd) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 			int size = bd.stat >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 			struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 			size -= 4; /* strip the CRC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 			skb = netdev_alloc_skb_ip_align(dev, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 			if (likely(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 				void *src = priv->vma[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 				memcpy_fromio(skb_put(skb, size), src, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 				skb->protocol = eth_type_trans(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 				dev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 				dev->stats.rx_bytes += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 				netif_receive_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 				if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 					dev_warn(&dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 					    "low on memory - packet dropped\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 				dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		/* clear the buffer descriptor so it can be reused */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		bd.stat &= ~RX_BD_STATS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		bd.stat |=  RX_BD_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		ethoc_write_bd(priv, entry, &bd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		if (++priv->cur_rx == priv->num_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 			priv->cur_rx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) static void ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	struct net_device *netdev = dev->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	if (bd->stat & TX_BD_LC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		dev_err(&netdev->dev, "TX: late collision\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 		netdev->stats.tx_window_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	if (bd->stat & TX_BD_RL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		dev_err(&netdev->dev, "TX: retransmit limit\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		netdev->stats.tx_aborted_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	if (bd->stat & TX_BD_UR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		dev_err(&netdev->dev, "TX: underrun\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		netdev->stats.tx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	if (bd->stat & TX_BD_CS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		dev_err(&netdev->dev, "TX: carrier sense lost\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		netdev->stats.tx_carrier_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	if (bd->stat & TX_BD_STATS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		netdev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	netdev->stats.collisions += (bd->stat >> 4) & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	netdev->stats.tx_bytes += bd->stat >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	netdev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) static int ethoc_tx(struct net_device *dev, int limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	struct ethoc *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	struct ethoc_bd bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	for (count = 0; count < limit; ++count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		unsigned int entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		entry = priv->dty_tx & (priv->num_tx-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		ethoc_read_bd(priv, entry, &bd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		if (bd.stat & TX_BD_READY || (priv->dty_tx == priv->cur_tx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 			ethoc_ack_irq(priv, INT_MASK_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 			/* If interrupt came in between reading in the BD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 			 * and clearing the interrupt source, then we risk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 			 * missing the event as the TX interrupt won't trigger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 			 * right away when we reenable it; hence, check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 			 * BD_EMPTY here again to make sure there isn't such an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 			 * event pending...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 			ethoc_read_bd(priv, entry, &bd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 			if (bd.stat & TX_BD_READY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 			    (priv->dty_tx == priv->cur_tx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		ethoc_update_tx_stats(priv, &bd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		priv->dty_tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	struct net_device *dev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	struct ethoc *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	u32 pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	/* Figure out what triggered the interrupt...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	 * The tricky bit here is that the interrupt source bits get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	 * set in INT_SOURCE for an event regardless of whether that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	 * event is masked or not.  Thus, in order to figure out what
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	 * triggered the interrupt, we need to remove the sources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	 * for all events that are currently masked.  This behaviour
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	 * is not particularly well documented but reasonable...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	mask = ethoc_read(priv, INT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	pending = ethoc_read(priv, INT_SOURCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	pending &= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	if (unlikely(pending == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	ethoc_ack_irq(priv, pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	/* We always handle the dropped packet interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	if (pending & INT_MASK_BUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 		dev_dbg(&dev->dev, "packet dropped\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	/* Handle receive/transmit event by switching to polling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	if (pending & (INT_MASK_TX | INT_MASK_RX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 		napi_schedule(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) static int ethoc_get_mac_address(struct net_device *dev, void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	struct ethoc *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	u8 *mac = (u8 *)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	reg = ethoc_read(priv, MAC_ADDR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	mac[2] = (reg >> 24) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	mac[3] = (reg >> 16) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	mac[4] = (reg >>  8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	mac[5] = (reg >>  0) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	reg = ethoc_read(priv, MAC_ADDR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	mac[0] = (reg >>  8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	mac[1] = (reg >>  0) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) static int ethoc_poll(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	struct ethoc *priv = container_of(napi, struct ethoc, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	int rx_work_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	int tx_work_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	rx_work_done = ethoc_rx(priv->netdev, budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	tx_work_done = ethoc_tx(priv->netdev, budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	if (rx_work_done < budget && tx_work_done < budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		napi_complete_done(napi, rx_work_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	return rx_work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	struct ethoc *priv = bus->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	for (i = 0; i < 5; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		u32 status = ethoc_read(priv, MIISTATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		if (!(status & MIISTATUS_BUSY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 			u32 data = ethoc_read(priv, MIIRX_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 			/* reset MII command register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 			ethoc_write(priv, MIICOMMAND, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 			return data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		usleep_range(100, 200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	struct ethoc *priv = bus->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	ethoc_write(priv, MIITX_DATA, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	for (i = 0; i < 5; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		u32 stat = ethoc_read(priv, MIISTATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		if (!(stat & MIISTATUS_BUSY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 			/* reset MII command register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 			ethoc_write(priv, MIICOMMAND, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		usleep_range(100, 200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) static void ethoc_mdio_poll(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	struct ethoc *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	struct phy_device *phydev = dev->phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	bool changed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	u32 mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	if (priv->old_link != phydev->link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		priv->old_link = phydev->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	if (priv->old_duplex != phydev->duplex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		priv->old_duplex = phydev->duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	if (!changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	mode = ethoc_read(priv, MODER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	if (phydev->duplex == DUPLEX_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		mode |= MODER_FULLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		mode &= ~MODER_FULLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	ethoc_write(priv, MODER, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	phy_print_status(phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) static int ethoc_mdio_probe(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	struct ethoc *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	struct phy_device *phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	if (priv->phy_id != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		phy = mdiobus_get_phy(priv->mdio, priv->phy_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		phy = phy_find_first(priv->mdio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	if (!phy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		dev_err(&dev->dev, "no PHY found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	priv->old_duplex = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	priv->old_link = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	err = phy_connect_direct(dev, phy, ethoc_mdio_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 				 PHY_INTERFACE_MODE_GMII);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		dev_err(&dev->dev, "could not attach to PHY\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	phy_set_max_speed(phy, SPEED_100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) static int ethoc_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	struct ethoc *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	ret = request_irq(dev->irq, ethoc_interrupt, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 			dev->name, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	napi_enable(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	ethoc_init_ring(priv, dev->mem_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	ethoc_reset(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	if (netif_queue_stopped(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		dev_dbg(&dev->dev, " resuming queue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 		dev_dbg(&dev->dev, " starting queue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 		netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	priv->old_link = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	priv->old_duplex = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	phy_start(dev->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	if (netif_msg_ifup(priv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 				dev->base_addr, dev->mem_start, dev->mem_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) static int ethoc_stop(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	struct ethoc *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	napi_disable(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	if (dev->phydev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		phy_stop(dev->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	ethoc_disable_rx_and_tx(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	free_irq(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	if (!netif_queue_stopped(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	struct ethoc *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	struct mii_ioctl_data *mdio = if_mii(ifr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	struct phy_device *phy = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	if (cmd != SIOCGMIIPHY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		if (mdio->phy_id >= PHY_MAX_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 			return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		phy = mdiobus_get_phy(priv->mdio, mdio->phy_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		if (!phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 			return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		phy = dev->phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	return phy_mii_ioctl(phy, ifr, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) static void ethoc_do_set_mac_address(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	struct ethoc *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	unsigned char *mac = dev->dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 				     (mac[4] <<  8) | (mac[5] <<  0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	ethoc_write(priv, MAC_ADDR1, (mac[0] <<  8) | (mac[1] <<  0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) static int ethoc_set_mac_address(struct net_device *dev, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	const struct sockaddr *addr = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	if (!is_valid_ether_addr(addr->sa_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 		return -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	ethoc_do_set_mac_address(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) static void ethoc_set_multicast_list(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	struct ethoc *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	u32 mode = ethoc_read(priv, MODER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	u32 hash[2] = { 0, 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	/* set loopback mode if requested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	if (dev->flags & IFF_LOOPBACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		mode |=  MODER_LOOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		mode &= ~MODER_LOOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	/* receive broadcast frames if requested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	if (dev->flags & IFF_BROADCAST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		mode &= ~MODER_BRO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		mode |=  MODER_BRO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	/* enable promiscuous mode if requested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	if (dev->flags & IFF_PROMISC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		mode |=  MODER_PRO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		mode &= ~MODER_PRO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	ethoc_write(priv, MODER, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	/* receive multicast frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	if (dev->flags & IFF_ALLMULTI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		hash[0] = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		hash[1] = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		netdev_for_each_mc_addr(ha, dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 			u32 crc = ether_crc(ETH_ALEN, ha->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 			int bit = (crc >> 26) & 0x3f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 			hash[bit >> 5] |= 1 << (bit & 0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	ethoc_write(priv, ETH_HASH0, hash[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	ethoc_write(priv, ETH_HASH1, hash[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) static int ethoc_change_mtu(struct net_device *dev, int new_mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) static void ethoc_tx_timeout(struct net_device *dev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	struct ethoc *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	u32 pending = ethoc_read(priv, INT_SOURCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	if (likely(pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		ethoc_interrupt(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	struct ethoc *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	struct ethoc_bd bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	unsigned int entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	void *dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	if (skb_put_padto(skb, ETHOC_ZLEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		goto out_no_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	if (unlikely(skb->len > ETHOC_BUFSIZ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	entry = priv->cur_tx % priv->num_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	spin_lock_irq(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	priv->cur_tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	ethoc_read_bd(priv, entry, &bd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	if (unlikely(skb->len < ETHOC_ZLEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		bd.stat |=  TX_BD_PAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		bd.stat &= ~TX_BD_PAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	dest = priv->vma[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	memcpy_toio(dest, skb->data, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	bd.stat |= TX_BD_LEN(skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	ethoc_write_bd(priv, entry, &bd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	bd.stat |= TX_BD_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	ethoc_write_bd(priv, entry, &bd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	if (priv->cur_tx == (priv->dty_tx + priv->num_tx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		dev_dbg(&dev->dev, "stopping queue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	spin_unlock_irq(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	skb_tx_timestamp(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) out_no_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) static int ethoc_get_regs_len(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	return ETH_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) static void ethoc_get_regs(struct net_device *dev, struct ethtool_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 			   void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	struct ethoc *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	u32 *regs_buff = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	regs->version = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	for (i = 0; i < ETH_END / sizeof(u32); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		regs_buff[i] = ethoc_read(priv, i * sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) static void ethoc_get_ringparam(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 				struct ethtool_ringparam *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	struct ethoc *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	ring->rx_max_pending = priv->num_bd - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	ring->rx_mini_max_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	ring->rx_jumbo_max_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	ring->tx_max_pending = priv->num_bd - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	ring->rx_pending = priv->num_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	ring->rx_mini_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	ring->rx_jumbo_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	ring->tx_pending = priv->num_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) static int ethoc_set_ringparam(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 			       struct ethtool_ringparam *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	struct ethoc *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	if (ring->tx_pending < 1 || ring->rx_pending < 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	    ring->tx_pending + ring->rx_pending > priv->num_bd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	if (netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		netif_tx_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		ethoc_disable_rx_and_tx(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		synchronize_irq(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	priv->num_tx = rounddown_pow_of_two(ring->tx_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	priv->num_rx = ring->rx_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	ethoc_init_ring(priv, dev->mem_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	if (netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		ethoc_enable_rx_and_tx(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) static const struct ethtool_ops ethoc_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	.get_regs_len = ethoc_get_regs_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	.get_regs = ethoc_get_regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	.nway_reset = phy_ethtool_nway_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	.get_link = ethtool_op_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	.get_ringparam = ethoc_get_ringparam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	.set_ringparam = ethoc_set_ringparam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	.get_ts_info = ethtool_op_get_ts_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	.get_link_ksettings = phy_ethtool_get_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	.set_link_ksettings = phy_ethtool_set_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) static const struct net_device_ops ethoc_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	.ndo_open = ethoc_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	.ndo_stop = ethoc_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	.ndo_do_ioctl = ethoc_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	.ndo_set_mac_address = ethoc_set_mac_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	.ndo_set_rx_mode = ethoc_set_multicast_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	.ndo_change_mtu = ethoc_change_mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	.ndo_tx_timeout = ethoc_tx_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	.ndo_start_xmit = ethoc_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)  * ethoc_probe - initialize OpenCores ethernet MAC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)  * @pdev:	platform device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) static int ethoc_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	struct net_device *netdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	struct resource *res = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	struct resource *mmio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	struct resource *mem = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	struct ethoc *priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	int num_bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	u32 eth_clkfreq = pdata ? pdata->eth_clkfreq : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	/* allocate networking device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	netdev = alloc_etherdev(sizeof(struct ethoc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	if (!netdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	SET_NETDEV_DEV(netdev, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	platform_set_drvdata(pdev, netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	/* obtain I/O memory space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		dev_err(&pdev->dev, "cannot obtain I/O memory space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	mmio = devm_request_mem_region(&pdev->dev, res->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 			resource_size(res), res->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	if (!mmio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		dev_err(&pdev->dev, "cannot request I/O memory space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	netdev->base_addr = mmio->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	/* obtain buffer memory space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		mem = devm_request_mem_region(&pdev->dev, res->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 			resource_size(res), res->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		if (!mem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 			dev_err(&pdev->dev, "cannot request memory space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 			ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 			goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		netdev->mem_start = mem->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		netdev->mem_end   = mem->end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	/* obtain device IRQ number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		dev_err(&pdev->dev, "cannot obtain IRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	netdev->irq = res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	/* setup driver-private data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	priv = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	priv->netdev = netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	priv->iobase = devm_ioremap(&pdev->dev, netdev->base_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 			resource_size(mmio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	if (!priv->iobase) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		dev_err(&pdev->dev, "cannot remap I/O memory space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	if (netdev->mem_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		priv->membase = devm_ioremap(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 			netdev->mem_start, resource_size(mem));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 		if (!priv->membase) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 			dev_err(&pdev->dev, "cannot remap memory space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 			ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 			goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 		/* Allocate buffer memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		priv->membase = dmam_alloc_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 			buffer_size, (void *)&netdev->mem_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 			GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		if (!priv->membase) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 			dev_err(&pdev->dev, "cannot allocate %dB buffer\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 				buffer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 			goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		netdev->mem_end = netdev->mem_start + buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	priv->big_endian = pdata ? pdata->big_endian :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		of_device_is_big_endian(pdev->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	/* calculate the number of TX/RX buffers, maximum 128 supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	num_bd = min_t(unsigned int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 		128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	if (num_bd < 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 		ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 		goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	priv->num_bd = num_bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	/* num_tx must be a power of two */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	priv->num_tx = rounddown_pow_of_two(num_bd >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	priv->num_rx = num_bd - priv->num_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	dev_dbg(&pdev->dev, "ethoc: num_tx: %d num_rx: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		priv->num_tx, priv->num_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	priv->vma = devm_kcalloc(&pdev->dev, num_bd, sizeof(void *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 				 GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	if (!priv->vma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	/* Allow the platform setup code to pass in a MAC address. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	if (pdata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		ether_addr_copy(netdev->dev_addr, pdata->hwaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		priv->phy_id = pdata->phy_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		const void *mac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		mac = of_get_mac_address(pdev->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		if (!IS_ERR(mac))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 			ether_addr_copy(netdev->dev_addr, mac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		priv->phy_id = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	/* Check that the given MAC address is valid. If it isn't, read the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	 * current MAC from the controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	if (!is_valid_ether_addr(netdev->dev_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 		ethoc_get_mac_address(netdev, netdev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	/* Check the MAC again for validity, if it still isn't choose and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	 * program a random one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	if (!is_valid_ether_addr(netdev->dev_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 		eth_hw_addr_random(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	ethoc_do_set_mac_address(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	/* Allow the platform setup code to adjust MII management bus clock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	if (!eth_clkfreq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		struct clk *clk = devm_clk_get(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		if (!IS_ERR(clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 			priv->clk = clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 			clk_prepare_enable(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 			eth_clkfreq = clk_get_rate(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	if (eth_clkfreq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 		u32 clkdiv = MIIMODER_CLKDIV(eth_clkfreq / 2500000 + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 		if (!clkdiv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 			clkdiv = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		dev_dbg(&pdev->dev, "setting MII clkdiv to %u\n", clkdiv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		ethoc_write(priv, MIIMODER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 			    (ethoc_read(priv, MIIMODER) & MIIMODER_NOPRE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 			    clkdiv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	/* register MII bus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	priv->mdio = mdiobus_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	if (!priv->mdio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		goto free2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	priv->mdio->name = "ethoc-mdio";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 			priv->mdio->name, pdev->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	priv->mdio->read = ethoc_mdio_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	priv->mdio->write = ethoc_mdio_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	priv->mdio->priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	ret = mdiobus_register(priv->mdio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 		dev_err(&netdev->dev, "failed to register MDIO bus\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 		goto free3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	ret = ethoc_mdio_probe(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		dev_err(&netdev->dev, "failed to probe MDIO bus\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	/* setup the net_device structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	netdev->netdev_ops = &ethoc_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	netdev->watchdog_timeo = ETHOC_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	netdev->features |= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	netdev->ethtool_ops = &ethoc_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	/* setup NAPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	spin_lock_init(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	ret = register_netdev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 		dev_err(&netdev->dev, "failed to register interface\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 		goto error2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) error2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	netif_napi_del(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	mdiobus_unregister(priv->mdio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) free3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	mdiobus_free(priv->mdio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) free2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	clk_disable_unprepare(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	free_netdev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)  * ethoc_remove - shutdown OpenCores ethernet MAC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)  * @pdev:	platform device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) static int ethoc_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	struct net_device *netdev = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	struct ethoc *priv = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	if (netdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		netif_napi_del(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 		phy_disconnect(netdev->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 		if (priv->mdio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 			mdiobus_unregister(priv->mdio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 			mdiobus_free(priv->mdio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 		clk_disable_unprepare(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		unregister_netdev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 		free_netdev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) static int ethoc_suspend(struct platform_device *pdev, pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) static int ethoc_resume(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) # define ethoc_suspend NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) # define ethoc_resume  NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) static const struct of_device_id ethoc_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	{ .compatible = "opencores,ethoc", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) MODULE_DEVICE_TABLE(of, ethoc_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) static struct platform_driver ethoc_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	.probe   = ethoc_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	.remove  = ethoc_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	.suspend = ethoc_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	.resume  = ethoc_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	.driver  = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		.name = "ethoc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 		.of_match_table = ethoc_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) module_platform_driver(ethoc_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) MODULE_DESCRIPTION("OpenCores Ethernet MAC driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)